2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
61 #define TG3_TSO_SUPPORT 1
65 #define DRV_MODULE_NAME "tg3"
66 #define PFX DRV_MODULE_NAME ": "
67 #define DRV_MODULE_VERSION "3.92"
68 #define DRV_MODULE_RELDATE "May 2, 2008"
70 #define TG3_DEF_MAC_MODE 0
71 #define TG3_DEF_RX_MODE 0
72 #define TG3_DEF_TX_MODE 0
73 #define TG3_DEF_MSG_ENABLE \
83 /* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
86 #define TG3_TX_TIMEOUT (5 * HZ)
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU 60
90 #define TG3_MAX_MTU(tp) \
91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
97 #define TG3_RX_RING_SIZE 512
98 #define TG3_DEF_RX_RING_PENDING 200
99 #define TG3_RX_JUMBO_RING_SIZE 256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
102 /* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
108 #define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
111 #define TG3_TX_RING_SIZE 512
112 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
114 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
122 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133 #define TG3_NUM_TEST 6
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
143 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
147 static struct pci_device_id tg3_pci_tbl[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
218 static const struct {
219 const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
253 { "tx_flow_control" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
286 { "rx_threshold_hit" },
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
299 static const struct {
300 const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
312 writel(val, tp->regs + off);
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
317 return (readl(tp->regs + off));
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
322 writel(val, tp->aperegs + off);
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
327 return (readl(tp->aperegs + off));
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
334 spin_lock_irqsave(&tp->indirect_lock, flags);
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
413 tg3_write32(tp, off, val);
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
427 tp->write32_mbox(tp, off, val);
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
435 void __iomem *mbox = tp->regs + off;
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
445 return (readl(tp->regs + off + GRCMBOX_BASE));
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
450 writel(val, tp->regs + off + GRCMBOX_BASE);
453 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
459 #define tw32(reg,val) tp->write32(tp, reg, val)
460 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg) tp->read32(tp, reg)
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
472 spin_lock_irqsave(&tp->indirect_lock, flags);
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 static void tg3_ape_lock_init(struct tg3 *tp)
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
536 case TG3_APE_LOCK_MEM:
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
573 case TG3_APE_LOCK_MEM:
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
583 static void tg3_disable_ints(struct tg3 *tp)
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
590 static inline void tg3_cond_int(struct tg3 *tp)
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
600 static void tg3_enable_ints(struct tg3 *tp)
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
638 * which reenables interrupts
640 static void tg3_restart_ints(struct tg3 *tp)
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
656 static inline void tg3_netif_stop(struct tg3 *tp)
658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
659 napi_disable(&tp->napi);
660 netif_tx_disable(tp->dev);
663 static inline void tg3_netif_start(struct tg3 *tp)
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
670 napi_enable(&tp->napi);
671 tp->hw_status->status |= SD_STATUS_UPDATED;
675 static void tg3_switch_clocks(struct tg3 *tp)
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
688 tp->pci_clock_ctrl = clock_ctrl;
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
707 #define PHY_BUSY_LOOPS 5000
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
729 tw32_f(MAC_MI_COM, frame_val);
731 loops = PHY_BUSY_LOOPS;
734 frame_val = tr32(MAC_MI_COM);
736 if ((frame_val & MI_COM_BUSY) == 0) {
738 frame_val = tr32(MAC_MI_COM);
746 *val = frame_val & MI_COM_DATA_MASK;
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
781 tw32_f(MAC_MI_COM, frame_val);
783 loops = PHY_BUSY_LOOPS;
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
789 frame_val = tr32(MAC_MI_COM);
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
807 static int tg3_bmcr_reset(struct tg3 *tp)
812 /* OK, reset it, and poll the BMCR_RESET bit until it
813 * clears or we time out.
815 phy_control = BMCR_RESET;
816 err = tg3_writephy(tp, MII_BMCR, phy_control);
822 err = tg3_readphy(tp, MII_BMCR, &phy_control);
826 if ((phy_control & BMCR_RESET) == 0) {
838 /* tp->lock is held. */
839 static void tg3_wait_for_event_ack(struct tg3 *tp)
843 /* Wait for up to 2.5 milliseconds */
844 for (i = 0; i < 250000; i++) {
845 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
851 /* tp->lock is held. */
852 static void tg3_ump_link_report(struct tg3 *tp)
857 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
858 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
861 tg3_wait_for_event_ack(tp);
863 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
865 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
868 if (!tg3_readphy(tp, MII_BMCR, ®))
870 if (!tg3_readphy(tp, MII_BMSR, ®))
871 val |= (reg & 0xffff);
872 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
875 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
877 if (!tg3_readphy(tp, MII_LPA, ®))
878 val |= (reg & 0xffff);
879 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
882 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
883 if (!tg3_readphy(tp, MII_CTRL1000, ®))
885 if (!tg3_readphy(tp, MII_STAT1000, ®))
886 val |= (reg & 0xffff);
888 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
890 if (!tg3_readphy(tp, MII_PHYADDR, ®))
894 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
896 val = tr32(GRC_RX_CPU_EVENT);
897 val |= GRC_RX_CPU_DRIVER_EVENT;
898 tw32_f(GRC_RX_CPU_EVENT, val);
901 static void tg3_link_report(struct tg3 *tp)
903 if (!netif_carrier_ok(tp->dev)) {
904 if (netif_msg_link(tp))
905 printk(KERN_INFO PFX "%s: Link is down.\n",
907 tg3_ump_link_report(tp);
908 } else if (netif_msg_link(tp)) {
909 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
911 (tp->link_config.active_speed == SPEED_1000 ?
913 (tp->link_config.active_speed == SPEED_100 ?
915 (tp->link_config.active_duplex == DUPLEX_FULL ?
919 "%s: Flow control is %s for TX and %s for RX.\n",
921 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
923 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
925 tg3_ump_link_report(tp);
929 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
933 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
934 miireg = ADVERTISE_PAUSE_CAP;
935 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
936 miireg = ADVERTISE_PAUSE_ASYM;
937 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
938 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
945 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
949 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
950 miireg = ADVERTISE_1000XPAUSE;
951 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
952 miireg = ADVERTISE_1000XPSE_ASYM;
953 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
954 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
961 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
965 if (lcladv & ADVERTISE_PAUSE_CAP) {
966 if (lcladv & ADVERTISE_PAUSE_ASYM) {
967 if (rmtadv & LPA_PAUSE_CAP)
968 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
969 else if (rmtadv & LPA_PAUSE_ASYM)
970 cap = TG3_FLOW_CTRL_RX;
972 if (rmtadv & LPA_PAUSE_CAP)
973 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
975 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
976 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
977 cap = TG3_FLOW_CTRL_TX;
983 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
987 if (lcladv & ADVERTISE_1000XPAUSE) {
988 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
989 if (rmtadv & LPA_1000XPAUSE)
990 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
991 else if (rmtadv & LPA_1000XPAUSE_ASYM)
992 cap = TG3_FLOW_CTRL_RX;
994 if (rmtadv & LPA_1000XPAUSE)
995 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
997 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
998 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
999 cap = TG3_FLOW_CTRL_TX;
1005 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1008 u32 old_rx_mode = tp->rx_mode;
1009 u32 old_tx_mode = tp->tx_mode;
1011 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1012 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1013 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1014 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1016 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1018 flowctrl = tp->link_config.flowctrl;
1020 tp->link_config.active_flowctrl = flowctrl;
1022 if (flowctrl & TG3_FLOW_CTRL_RX)
1023 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1025 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1027 if (old_rx_mode != tp->rx_mode)
1028 tw32_f(MAC_RX_MODE, tp->rx_mode);
1030 if (flowctrl & TG3_FLOW_CTRL_TX)
1031 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1033 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1035 if (old_tx_mode != tp->tx_mode)
1036 tw32_f(MAC_TX_MODE, tp->tx_mode);
1039 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1041 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1042 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1045 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1049 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1050 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1056 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1057 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1058 ephy | MII_TG3_EPHY_SHADOW_EN);
1059 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1061 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1063 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1064 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1066 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1069 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1070 MII_TG3_AUXCTL_SHDWSEL_MISC;
1071 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1072 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1074 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1076 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1077 phy |= MII_TG3_AUXCTL_MISC_WREN;
1078 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1083 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1087 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1090 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1091 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1092 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1093 (val | (1 << 15) | (1 << 4)));
1096 static void tg3_phy_apply_otp(struct tg3 *tp)
1105 /* Enable SM_DSP clock and tx 6dB coding. */
1106 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1107 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1108 MII_TG3_AUXCTL_ACTL_TX_6DB;
1109 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1111 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1112 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1113 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1115 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1116 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1117 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1119 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1120 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1121 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1123 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1124 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1126 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1127 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1129 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1130 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1131 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1133 /* Turn off SM_DSP clock. */
1134 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1135 MII_TG3_AUXCTL_ACTL_TX_6DB;
1136 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1139 static int tg3_wait_macro_done(struct tg3 *tp)
1146 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1147 if ((tmp32 & 0x1000) == 0)
1157 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1159 static const u32 test_pat[4][6] = {
1160 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1161 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1162 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1163 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1167 for (chan = 0; chan < 4; chan++) {
1170 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1171 (chan * 0x2000) | 0x0200);
1172 tg3_writephy(tp, 0x16, 0x0002);
1174 for (i = 0; i < 6; i++)
1175 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1178 tg3_writephy(tp, 0x16, 0x0202);
1179 if (tg3_wait_macro_done(tp)) {
1184 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1185 (chan * 0x2000) | 0x0200);
1186 tg3_writephy(tp, 0x16, 0x0082);
1187 if (tg3_wait_macro_done(tp)) {
1192 tg3_writephy(tp, 0x16, 0x0802);
1193 if (tg3_wait_macro_done(tp)) {
1198 for (i = 0; i < 6; i += 2) {
1201 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1202 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1203 tg3_wait_macro_done(tp)) {
1209 if (low != test_pat[chan][i] ||
1210 high != test_pat[chan][i+1]) {
1211 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1212 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1213 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1223 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1227 for (chan = 0; chan < 4; chan++) {
1230 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1231 (chan * 0x2000) | 0x0200);
1232 tg3_writephy(tp, 0x16, 0x0002);
1233 for (i = 0; i < 6; i++)
1234 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1235 tg3_writephy(tp, 0x16, 0x0202);
1236 if (tg3_wait_macro_done(tp))
1243 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1245 u32 reg32, phy9_orig;
1246 int retries, do_phy_reset, err;
1252 err = tg3_bmcr_reset(tp);
1258 /* Disable transmitter and interrupt. */
1259 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1263 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1265 /* Set full-duplex, 1000 mbps. */
1266 tg3_writephy(tp, MII_BMCR,
1267 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1269 /* Set to master mode. */
1270 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1273 tg3_writephy(tp, MII_TG3_CTRL,
1274 (MII_TG3_CTRL_AS_MASTER |
1275 MII_TG3_CTRL_ENABLE_AS_MASTER));
1277 /* Enable SM_DSP_CLOCK and 6dB. */
1278 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1280 /* Block the PHY control access. */
1281 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1282 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1284 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1287 } while (--retries);
1289 err = tg3_phy_reset_chanpat(tp);
1293 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1294 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1296 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1297 tg3_writephy(tp, 0x16, 0x0000);
1299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1301 /* Set Extended packet length bit for jumbo frames */
1302 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1305 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1308 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1310 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1312 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1319 /* This will reset the tigon3 PHY if there is no valid
1320 * link unless the FORCE argument is non-zero.
1322 static int tg3_phy_reset(struct tg3 *tp)
1328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1331 val = tr32(GRC_MISC_CFG);
1332 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1335 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1336 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1340 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1341 netif_carrier_off(tp->dev);
1342 tg3_link_report(tp);
1345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1348 err = tg3_phy_reset_5703_4_5(tp);
1355 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1356 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1357 cpmuctrl = tr32(TG3_CPMU_CTRL);
1358 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1360 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1363 err = tg3_bmcr_reset(tp);
1367 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1370 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1371 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1373 tw32(TG3_CPMU_CTRL, cpmuctrl);
1376 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1379 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1380 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1381 CPMU_LSPD_1000MB_MACCLK_12_5) {
1382 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1384 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1387 /* Disable GPHY autopowerdown. */
1388 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1389 MII_TG3_MISC_SHDW_WREN |
1390 MII_TG3_MISC_SHDW_APD_SEL |
1391 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1394 tg3_phy_apply_otp(tp);
1397 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1398 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1400 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1401 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1402 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1403 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1405 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1406 tg3_writephy(tp, 0x1c, 0x8d68);
1407 tg3_writephy(tp, 0x1c, 0x8d68);
1409 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1410 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1411 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1412 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1413 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1414 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1415 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1416 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1417 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1419 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1420 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1421 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1422 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1423 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1424 tg3_writephy(tp, MII_TG3_TEST1,
1425 MII_TG3_TEST1_TRIM_EN | 0x4);
1427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1428 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1430 /* Set Extended packet length bit (bit 14) on all chips that */
1431 /* support jumbo frames */
1432 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1433 /* Cannot do read-modify-write on 5401 */
1434 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1435 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1438 /* Set bit 14 with read-modify-write to preserve other bits */
1439 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1440 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1441 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1444 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1445 * jumbo frames transmission.
1447 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1450 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1451 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1452 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1456 /* adjust output voltage */
1457 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1460 tg3_phy_toggle_automdix(tp, 1);
1461 tg3_phy_set_wirespeed(tp);
1465 static void tg3_frob_aux_power(struct tg3 *tp)
1467 struct tg3 *tp_peer = tp;
1469 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1472 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1473 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1474 struct net_device *dev_peer;
1476 dev_peer = pci_get_drvdata(tp->pdev_peer);
1477 /* remove_one() may have been run on the peer. */
1481 tp_peer = netdev_priv(dev_peer);
1484 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1485 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1486 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1487 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1490 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1491 (GRC_LCLCTRL_GPIO_OE0 |
1492 GRC_LCLCTRL_GPIO_OE1 |
1493 GRC_LCLCTRL_GPIO_OE2 |
1494 GRC_LCLCTRL_GPIO_OUTPUT0 |
1495 GRC_LCLCTRL_GPIO_OUTPUT1),
1499 u32 grc_local_ctrl = 0;
1501 if (tp_peer != tp &&
1502 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1505 /* Workaround to prevent overdrawing Amps. */
1506 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1508 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1509 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1510 grc_local_ctrl, 100);
1513 /* On 5753 and variants, GPIO2 cannot be used. */
1514 no_gpio2 = tp->nic_sram_data_cfg &
1515 NIC_SRAM_DATA_CFG_NO_GPIO2;
1517 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1518 GRC_LCLCTRL_GPIO_OE1 |
1519 GRC_LCLCTRL_GPIO_OE2 |
1520 GRC_LCLCTRL_GPIO_OUTPUT1 |
1521 GRC_LCLCTRL_GPIO_OUTPUT2;
1523 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1524 GRC_LCLCTRL_GPIO_OUTPUT2);
1526 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1527 grc_local_ctrl, 100);
1529 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1531 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1532 grc_local_ctrl, 100);
1535 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1536 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1537 grc_local_ctrl, 100);
1541 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1542 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1543 if (tp_peer != tp &&
1544 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1547 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1548 (GRC_LCLCTRL_GPIO_OE1 |
1549 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1551 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1552 GRC_LCLCTRL_GPIO_OE1, 100);
1554 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1555 (GRC_LCLCTRL_GPIO_OE1 |
1556 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1561 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1563 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1565 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1566 if (speed != SPEED_10)
1568 } else if (speed == SPEED_10)
1574 static int tg3_setup_phy(struct tg3 *, int);
1576 #define RESET_KIND_SHUTDOWN 0
1577 #define RESET_KIND_INIT 1
1578 #define RESET_KIND_SUSPEND 2
1580 static void tg3_write_sig_post_reset(struct tg3 *, int);
1581 static int tg3_halt_cpu(struct tg3 *, u32);
1582 static int tg3_nvram_lock(struct tg3 *);
1583 static void tg3_nvram_unlock(struct tg3 *);
1585 static void tg3_power_down_phy(struct tg3 *tp)
1589 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1591 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1592 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1595 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1596 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1597 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1604 val = tr32(GRC_MISC_CFG);
1605 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1608 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1609 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1610 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1611 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1614 /* The PHY should not be powered down on some chips because
1617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1619 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1620 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1623 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1624 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1625 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1626 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1627 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1630 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1633 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1636 u16 power_control, power_caps;
1637 int pm = tp->pm_cap;
1639 /* Make sure register accesses (indirect or otherwise)
1640 * will function correctly.
1642 pci_write_config_dword(tp->pdev,
1643 TG3PCI_MISC_HOST_CTRL,
1644 tp->misc_host_ctrl);
1646 pci_read_config_word(tp->pdev,
1649 power_control |= PCI_PM_CTRL_PME_STATUS;
1650 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1654 pci_write_config_word(tp->pdev,
1657 udelay(100); /* Delay after power state change */
1659 /* Switch out of Vaux if it is a NIC */
1660 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1661 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1678 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1680 tp->dev->name, state);
1684 power_control |= PCI_PM_CTRL_PME_ENABLE;
1686 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1687 tw32(TG3PCI_MISC_HOST_CTRL,
1688 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1690 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1691 tp->link_config.phy_is_low_power = 1;
1693 if (tp->link_config.phy_is_low_power == 0) {
1694 tp->link_config.phy_is_low_power = 1;
1695 tp->link_config.orig_speed = tp->link_config.speed;
1696 tp->link_config.orig_duplex = tp->link_config.duplex;
1697 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1700 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1701 tp->link_config.speed = SPEED_10;
1702 tp->link_config.duplex = DUPLEX_HALF;
1703 tp->link_config.autoneg = AUTONEG_ENABLE;
1704 tg3_setup_phy(tp, 0);
1708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1711 val = tr32(GRC_VCPU_EXT_CTRL);
1712 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1713 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1717 for (i = 0; i < 200; i++) {
1718 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1719 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1724 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1725 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1726 WOL_DRV_STATE_SHUTDOWN |
1730 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1732 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1735 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1736 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1737 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1741 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1742 mac_mode = MAC_MODE_PORT_MODE_GMII;
1744 mac_mode = MAC_MODE_PORT_MODE_MII;
1746 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1747 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1749 u32 speed = (tp->tg3_flags &
1750 TG3_FLAG_WOL_SPEED_100MB) ?
1751 SPEED_100 : SPEED_10;
1752 if (tg3_5700_link_polarity(tp, speed))
1753 mac_mode |= MAC_MODE_LINK_POLARITY;
1755 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1758 mac_mode = MAC_MODE_PORT_MODE_TBI;
1761 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1762 tw32(MAC_LED_CTRL, tp->led_ctrl);
1764 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1765 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1766 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1768 tw32_f(MAC_MODE, mac_mode);
1771 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1775 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1776 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1780 base_val = tp->pci_clock_ctrl;
1781 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1782 CLOCK_CTRL_TXCLK_DISABLE);
1784 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1785 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1786 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1787 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1788 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1790 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1791 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1792 u32 newbits1, newbits2;
1794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1796 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1797 CLOCK_CTRL_TXCLK_DISABLE |
1799 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1800 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1801 newbits1 = CLOCK_CTRL_625_CORE;
1802 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1804 newbits1 = CLOCK_CTRL_ALTCLK;
1805 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1808 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1811 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1814 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1818 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1819 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1820 CLOCK_CTRL_TXCLK_DISABLE |
1821 CLOCK_CTRL_44MHZ_CORE);
1823 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1826 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1827 tp->pci_clock_ctrl | newbits3, 40);
1831 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1832 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1833 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1834 tg3_power_down_phy(tp);
1836 tg3_frob_aux_power(tp);
1838 /* Workaround for unstable PLL clock */
1839 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1840 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1841 u32 val = tr32(0x7d00);
1843 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1845 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1848 err = tg3_nvram_lock(tp);
1849 tg3_halt_cpu(tp, RX_CPU_BASE);
1851 tg3_nvram_unlock(tp);
1855 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1857 /* Finally, set the new power state. */
1858 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1859 udelay(100); /* Delay after power state change */
1864 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1866 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1867 case MII_TG3_AUX_STAT_10HALF:
1869 *duplex = DUPLEX_HALF;
1872 case MII_TG3_AUX_STAT_10FULL:
1874 *duplex = DUPLEX_FULL;
1877 case MII_TG3_AUX_STAT_100HALF:
1879 *duplex = DUPLEX_HALF;
1882 case MII_TG3_AUX_STAT_100FULL:
1884 *duplex = DUPLEX_FULL;
1887 case MII_TG3_AUX_STAT_1000HALF:
1888 *speed = SPEED_1000;
1889 *duplex = DUPLEX_HALF;
1892 case MII_TG3_AUX_STAT_1000FULL:
1893 *speed = SPEED_1000;
1894 *duplex = DUPLEX_FULL;
1898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1899 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1901 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1905 *speed = SPEED_INVALID;
1906 *duplex = DUPLEX_INVALID;
1911 static void tg3_phy_copper_begin(struct tg3 *tp)
1916 if (tp->link_config.phy_is_low_power) {
1917 /* Entering low power mode. Disable gigabit and
1918 * 100baseT advertisements.
1920 tg3_writephy(tp, MII_TG3_CTRL, 0);
1922 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1923 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1924 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1925 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1927 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1928 } else if (tp->link_config.speed == SPEED_INVALID) {
1929 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1930 tp->link_config.advertising &=
1931 ~(ADVERTISED_1000baseT_Half |
1932 ADVERTISED_1000baseT_Full);
1934 new_adv = ADVERTISE_CSMA;
1935 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1936 new_adv |= ADVERTISE_10HALF;
1937 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1938 new_adv |= ADVERTISE_10FULL;
1939 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1940 new_adv |= ADVERTISE_100HALF;
1941 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1942 new_adv |= ADVERTISE_100FULL;
1944 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1946 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1948 if (tp->link_config.advertising &
1949 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1951 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1952 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1953 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1954 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1955 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1956 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1957 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1958 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1959 MII_TG3_CTRL_ENABLE_AS_MASTER);
1960 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1962 tg3_writephy(tp, MII_TG3_CTRL, 0);
1965 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1966 new_adv |= ADVERTISE_CSMA;
1968 /* Asking for a specific link mode. */
1969 if (tp->link_config.speed == SPEED_1000) {
1970 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1972 if (tp->link_config.duplex == DUPLEX_FULL)
1973 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1975 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1976 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1977 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1978 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1979 MII_TG3_CTRL_ENABLE_AS_MASTER);
1981 if (tp->link_config.speed == SPEED_100) {
1982 if (tp->link_config.duplex == DUPLEX_FULL)
1983 new_adv |= ADVERTISE_100FULL;
1985 new_adv |= ADVERTISE_100HALF;
1987 if (tp->link_config.duplex == DUPLEX_FULL)
1988 new_adv |= ADVERTISE_10FULL;
1990 new_adv |= ADVERTISE_10HALF;
1992 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1997 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2000 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2001 tp->link_config.speed != SPEED_INVALID) {
2002 u32 bmcr, orig_bmcr;
2004 tp->link_config.active_speed = tp->link_config.speed;
2005 tp->link_config.active_duplex = tp->link_config.duplex;
2008 switch (tp->link_config.speed) {
2014 bmcr |= BMCR_SPEED100;
2018 bmcr |= TG3_BMCR_SPEED1000;
2022 if (tp->link_config.duplex == DUPLEX_FULL)
2023 bmcr |= BMCR_FULLDPLX;
2025 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2026 (bmcr != orig_bmcr)) {
2027 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2028 for (i = 0; i < 1500; i++) {
2032 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2033 tg3_readphy(tp, MII_BMSR, &tmp))
2035 if (!(tmp & BMSR_LSTATUS)) {
2040 tg3_writephy(tp, MII_BMCR, bmcr);
2044 tg3_writephy(tp, MII_BMCR,
2045 BMCR_ANENABLE | BMCR_ANRESTART);
2049 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2053 /* Turn off tap power management. */
2054 /* Set Extended packet length bit */
2055 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2057 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2058 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2060 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2061 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2063 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2064 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2066 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2067 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2069 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2070 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2077 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2079 u32 adv_reg, all_mask = 0;
2081 if (mask & ADVERTISED_10baseT_Half)
2082 all_mask |= ADVERTISE_10HALF;
2083 if (mask & ADVERTISED_10baseT_Full)
2084 all_mask |= ADVERTISE_10FULL;
2085 if (mask & ADVERTISED_100baseT_Half)
2086 all_mask |= ADVERTISE_100HALF;
2087 if (mask & ADVERTISED_100baseT_Full)
2088 all_mask |= ADVERTISE_100FULL;
2090 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2093 if ((adv_reg & all_mask) != all_mask)
2095 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2099 if (mask & ADVERTISED_1000baseT_Half)
2100 all_mask |= ADVERTISE_1000HALF;
2101 if (mask & ADVERTISED_1000baseT_Full)
2102 all_mask |= ADVERTISE_1000FULL;
2104 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2107 if ((tg3_ctrl & all_mask) != all_mask)
2113 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2117 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2120 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2121 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2123 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2124 if (curadv != reqadv)
2127 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2128 tg3_readphy(tp, MII_LPA, rmtadv);
2130 /* Reprogram the advertisement register, even if it
2131 * does not affect the current link. If the link
2132 * gets renegotiated in the future, we can save an
2133 * additional renegotiation cycle by advertising
2134 * it correctly in the first place.
2136 if (curadv != reqadv) {
2137 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2138 ADVERTISE_PAUSE_ASYM);
2139 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2146 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2148 int current_link_up;
2150 u32 lcl_adv, rmt_adv;
2158 (MAC_STATUS_SYNC_CHANGED |
2159 MAC_STATUS_CFG_CHANGED |
2160 MAC_STATUS_MI_COMPLETION |
2161 MAC_STATUS_LNKSTATE_CHANGED));
2164 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2166 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2170 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2172 /* Some third-party PHYs need to be reset on link going
2175 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2178 netif_carrier_ok(tp->dev)) {
2179 tg3_readphy(tp, MII_BMSR, &bmsr);
2180 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2181 !(bmsr & BMSR_LSTATUS))
2187 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2188 tg3_readphy(tp, MII_BMSR, &bmsr);
2189 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2190 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2193 if (!(bmsr & BMSR_LSTATUS)) {
2194 err = tg3_init_5401phy_dsp(tp);
2198 tg3_readphy(tp, MII_BMSR, &bmsr);
2199 for (i = 0; i < 1000; i++) {
2201 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2202 (bmsr & BMSR_LSTATUS)) {
2208 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2209 !(bmsr & BMSR_LSTATUS) &&
2210 tp->link_config.active_speed == SPEED_1000) {
2211 err = tg3_phy_reset(tp);
2213 err = tg3_init_5401phy_dsp(tp);
2218 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2219 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2220 /* 5701 {A0,B0} CRC bug workaround */
2221 tg3_writephy(tp, 0x15, 0x0a75);
2222 tg3_writephy(tp, 0x1c, 0x8c68);
2223 tg3_writephy(tp, 0x1c, 0x8d68);
2224 tg3_writephy(tp, 0x1c, 0x8c68);
2227 /* Clear pending interrupts... */
2228 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2229 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2231 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2232 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2233 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2234 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2236 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2238 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2239 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2240 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2242 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2245 current_link_up = 0;
2246 current_speed = SPEED_INVALID;
2247 current_duplex = DUPLEX_INVALID;
2249 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2252 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2253 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2254 if (!(val & (1 << 10))) {
2256 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2262 for (i = 0; i < 100; i++) {
2263 tg3_readphy(tp, MII_BMSR, &bmsr);
2264 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2265 (bmsr & BMSR_LSTATUS))
2270 if (bmsr & BMSR_LSTATUS) {
2273 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2274 for (i = 0; i < 2000; i++) {
2276 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2281 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2286 for (i = 0; i < 200; i++) {
2287 tg3_readphy(tp, MII_BMCR, &bmcr);
2288 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2290 if (bmcr && bmcr != 0x7fff)
2298 tp->link_config.active_speed = current_speed;
2299 tp->link_config.active_duplex = current_duplex;
2301 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2302 if ((bmcr & BMCR_ANENABLE) &&
2303 tg3_copper_is_advertising_all(tp,
2304 tp->link_config.advertising)) {
2305 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2307 current_link_up = 1;
2310 if (!(bmcr & BMCR_ANENABLE) &&
2311 tp->link_config.speed == current_speed &&
2312 tp->link_config.duplex == current_duplex &&
2313 tp->link_config.flowctrl ==
2314 tp->link_config.active_flowctrl) {
2315 current_link_up = 1;
2319 if (current_link_up == 1 &&
2320 tp->link_config.active_duplex == DUPLEX_FULL)
2321 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2325 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2328 tg3_phy_copper_begin(tp);
2330 tg3_readphy(tp, MII_BMSR, &tmp);
2331 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2332 (tmp & BMSR_LSTATUS))
2333 current_link_up = 1;
2336 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2337 if (current_link_up == 1) {
2338 if (tp->link_config.active_speed == SPEED_100 ||
2339 tp->link_config.active_speed == SPEED_10)
2340 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2342 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2344 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2346 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2347 if (tp->link_config.active_duplex == DUPLEX_HALF)
2348 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2351 if (current_link_up == 1 &&
2352 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2353 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2355 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2358 /* ??? Without this setting Netgear GA302T PHY does not
2359 * ??? send/receive packets...
2361 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2362 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2363 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2364 tw32_f(MAC_MI_MODE, tp->mi_mode);
2368 tw32_f(MAC_MODE, tp->mac_mode);
2371 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2372 /* Polled via timer. */
2373 tw32_f(MAC_EVENT, 0);
2375 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2380 current_link_up == 1 &&
2381 tp->link_config.active_speed == SPEED_1000 &&
2382 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2383 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2386 (MAC_STATUS_SYNC_CHANGED |
2387 MAC_STATUS_CFG_CHANGED));
2390 NIC_SRAM_FIRMWARE_MBOX,
2391 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2394 if (current_link_up != netif_carrier_ok(tp->dev)) {
2395 if (current_link_up)
2396 netif_carrier_on(tp->dev);
2398 netif_carrier_off(tp->dev);
2399 tg3_link_report(tp);
2405 struct tg3_fiber_aneginfo {
2407 #define ANEG_STATE_UNKNOWN 0
2408 #define ANEG_STATE_AN_ENABLE 1
2409 #define ANEG_STATE_RESTART_INIT 2
2410 #define ANEG_STATE_RESTART 3
2411 #define ANEG_STATE_DISABLE_LINK_OK 4
2412 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2413 #define ANEG_STATE_ABILITY_DETECT 6
2414 #define ANEG_STATE_ACK_DETECT_INIT 7
2415 #define ANEG_STATE_ACK_DETECT 8
2416 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2417 #define ANEG_STATE_COMPLETE_ACK 10
2418 #define ANEG_STATE_IDLE_DETECT_INIT 11
2419 #define ANEG_STATE_IDLE_DETECT 12
2420 #define ANEG_STATE_LINK_OK 13
2421 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2422 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2425 #define MR_AN_ENABLE 0x00000001
2426 #define MR_RESTART_AN 0x00000002
2427 #define MR_AN_COMPLETE 0x00000004
2428 #define MR_PAGE_RX 0x00000008
2429 #define MR_NP_LOADED 0x00000010
2430 #define MR_TOGGLE_TX 0x00000020
2431 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2432 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2433 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2434 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2435 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2436 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2437 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2438 #define MR_TOGGLE_RX 0x00002000
2439 #define MR_NP_RX 0x00004000
2441 #define MR_LINK_OK 0x80000000
2443 unsigned long link_time, cur_time;
2445 u32 ability_match_cfg;
2446 int ability_match_count;
2448 char ability_match, idle_match, ack_match;
2450 u32 txconfig, rxconfig;
2451 #define ANEG_CFG_NP 0x00000080
2452 #define ANEG_CFG_ACK 0x00000040
2453 #define ANEG_CFG_RF2 0x00000020
2454 #define ANEG_CFG_RF1 0x00000010
2455 #define ANEG_CFG_PS2 0x00000001
2456 #define ANEG_CFG_PS1 0x00008000
2457 #define ANEG_CFG_HD 0x00004000
2458 #define ANEG_CFG_FD 0x00002000
2459 #define ANEG_CFG_INVAL 0x00001f06
2464 #define ANEG_TIMER_ENAB 2
2465 #define ANEG_FAILED -1
2467 #define ANEG_STATE_SETTLE_TIME 10000
2469 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2470 struct tg3_fiber_aneginfo *ap)
2473 unsigned long delta;
2477 if (ap->state == ANEG_STATE_UNKNOWN) {
2481 ap->ability_match_cfg = 0;
2482 ap->ability_match_count = 0;
2483 ap->ability_match = 0;
2489 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2490 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2492 if (rx_cfg_reg != ap->ability_match_cfg) {
2493 ap->ability_match_cfg = rx_cfg_reg;
2494 ap->ability_match = 0;
2495 ap->ability_match_count = 0;
2497 if (++ap->ability_match_count > 1) {
2498 ap->ability_match = 1;
2499 ap->ability_match_cfg = rx_cfg_reg;
2502 if (rx_cfg_reg & ANEG_CFG_ACK)
2510 ap->ability_match_cfg = 0;
2511 ap->ability_match_count = 0;
2512 ap->ability_match = 0;
2518 ap->rxconfig = rx_cfg_reg;
2522 case ANEG_STATE_UNKNOWN:
2523 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2524 ap->state = ANEG_STATE_AN_ENABLE;
2527 case ANEG_STATE_AN_ENABLE:
2528 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2529 if (ap->flags & MR_AN_ENABLE) {
2532 ap->ability_match_cfg = 0;
2533 ap->ability_match_count = 0;
2534 ap->ability_match = 0;
2538 ap->state = ANEG_STATE_RESTART_INIT;
2540 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2544 case ANEG_STATE_RESTART_INIT:
2545 ap->link_time = ap->cur_time;
2546 ap->flags &= ~(MR_NP_LOADED);
2548 tw32(MAC_TX_AUTO_NEG, 0);
2549 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2550 tw32_f(MAC_MODE, tp->mac_mode);
2553 ret = ANEG_TIMER_ENAB;
2554 ap->state = ANEG_STATE_RESTART;
2557 case ANEG_STATE_RESTART:
2558 delta = ap->cur_time - ap->link_time;
2559 if (delta > ANEG_STATE_SETTLE_TIME) {
2560 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2562 ret = ANEG_TIMER_ENAB;
2566 case ANEG_STATE_DISABLE_LINK_OK:
2570 case ANEG_STATE_ABILITY_DETECT_INIT:
2571 ap->flags &= ~(MR_TOGGLE_TX);
2572 ap->txconfig = ANEG_CFG_FD;
2573 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2574 if (flowctrl & ADVERTISE_1000XPAUSE)
2575 ap->txconfig |= ANEG_CFG_PS1;
2576 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2577 ap->txconfig |= ANEG_CFG_PS2;
2578 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2579 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2580 tw32_f(MAC_MODE, tp->mac_mode);
2583 ap->state = ANEG_STATE_ABILITY_DETECT;
2586 case ANEG_STATE_ABILITY_DETECT:
2587 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2588 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2592 case ANEG_STATE_ACK_DETECT_INIT:
2593 ap->txconfig |= ANEG_CFG_ACK;
2594 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2595 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2596 tw32_f(MAC_MODE, tp->mac_mode);
2599 ap->state = ANEG_STATE_ACK_DETECT;
2602 case ANEG_STATE_ACK_DETECT:
2603 if (ap->ack_match != 0) {
2604 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2605 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2606 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2608 ap->state = ANEG_STATE_AN_ENABLE;
2610 } else if (ap->ability_match != 0 &&
2611 ap->rxconfig == 0) {
2612 ap->state = ANEG_STATE_AN_ENABLE;
2616 case ANEG_STATE_COMPLETE_ACK_INIT:
2617 if (ap->rxconfig & ANEG_CFG_INVAL) {
2621 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2622 MR_LP_ADV_HALF_DUPLEX |
2623 MR_LP_ADV_SYM_PAUSE |
2624 MR_LP_ADV_ASYM_PAUSE |
2625 MR_LP_ADV_REMOTE_FAULT1 |
2626 MR_LP_ADV_REMOTE_FAULT2 |
2627 MR_LP_ADV_NEXT_PAGE |
2630 if (ap->rxconfig & ANEG_CFG_FD)
2631 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2632 if (ap->rxconfig & ANEG_CFG_HD)
2633 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2634 if (ap->rxconfig & ANEG_CFG_PS1)
2635 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2636 if (ap->rxconfig & ANEG_CFG_PS2)
2637 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2638 if (ap->rxconfig & ANEG_CFG_RF1)
2639 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2640 if (ap->rxconfig & ANEG_CFG_RF2)
2641 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2642 if (ap->rxconfig & ANEG_CFG_NP)
2643 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2645 ap->link_time = ap->cur_time;
2647 ap->flags ^= (MR_TOGGLE_TX);
2648 if (ap->rxconfig & 0x0008)
2649 ap->flags |= MR_TOGGLE_RX;
2650 if (ap->rxconfig & ANEG_CFG_NP)
2651 ap->flags |= MR_NP_RX;
2652 ap->flags |= MR_PAGE_RX;
2654 ap->state = ANEG_STATE_COMPLETE_ACK;
2655 ret = ANEG_TIMER_ENAB;
2658 case ANEG_STATE_COMPLETE_ACK:
2659 if (ap->ability_match != 0 &&
2660 ap->rxconfig == 0) {
2661 ap->state = ANEG_STATE_AN_ENABLE;
2664 delta = ap->cur_time - ap->link_time;
2665 if (delta > ANEG_STATE_SETTLE_TIME) {
2666 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2667 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2669 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2670 !(ap->flags & MR_NP_RX)) {
2671 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2679 case ANEG_STATE_IDLE_DETECT_INIT:
2680 ap->link_time = ap->cur_time;
2681 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2682 tw32_f(MAC_MODE, tp->mac_mode);
2685 ap->state = ANEG_STATE_IDLE_DETECT;
2686 ret = ANEG_TIMER_ENAB;
2689 case ANEG_STATE_IDLE_DETECT:
2690 if (ap->ability_match != 0 &&
2691 ap->rxconfig == 0) {
2692 ap->state = ANEG_STATE_AN_ENABLE;
2695 delta = ap->cur_time - ap->link_time;
2696 if (delta > ANEG_STATE_SETTLE_TIME) {
2697 /* XXX another gem from the Broadcom driver :( */
2698 ap->state = ANEG_STATE_LINK_OK;
2702 case ANEG_STATE_LINK_OK:
2703 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2707 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2708 /* ??? unimplemented */
2711 case ANEG_STATE_NEXT_PAGE_WAIT:
2712 /* ??? unimplemented */
2723 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2726 struct tg3_fiber_aneginfo aninfo;
2727 int status = ANEG_FAILED;
2731 tw32_f(MAC_TX_AUTO_NEG, 0);
2733 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2734 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2737 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2740 memset(&aninfo, 0, sizeof(aninfo));
2741 aninfo.flags |= MR_AN_ENABLE;
2742 aninfo.state = ANEG_STATE_UNKNOWN;
2743 aninfo.cur_time = 0;
2745 while (++tick < 195000) {
2746 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2747 if (status == ANEG_DONE || status == ANEG_FAILED)
2753 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2754 tw32_f(MAC_MODE, tp->mac_mode);
2757 *txflags = aninfo.txconfig;
2758 *rxflags = aninfo.flags;
2760 if (status == ANEG_DONE &&
2761 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2762 MR_LP_ADV_FULL_DUPLEX)))
2768 static void tg3_init_bcm8002(struct tg3 *tp)
2770 u32 mac_status = tr32(MAC_STATUS);
2773 /* Reset when initting first time or we have a link. */
2774 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2775 !(mac_status & MAC_STATUS_PCS_SYNCED))
2778 /* Set PLL lock range. */
2779 tg3_writephy(tp, 0x16, 0x8007);
2782 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2784 /* Wait for reset to complete. */
2785 /* XXX schedule_timeout() ... */
2786 for (i = 0; i < 500; i++)
2789 /* Config mode; select PMA/Ch 1 regs. */
2790 tg3_writephy(tp, 0x10, 0x8411);
2792 /* Enable auto-lock and comdet, select txclk for tx. */
2793 tg3_writephy(tp, 0x11, 0x0a10);
2795 tg3_writephy(tp, 0x18, 0x00a0);
2796 tg3_writephy(tp, 0x16, 0x41ff);
2798 /* Assert and deassert POR. */
2799 tg3_writephy(tp, 0x13, 0x0400);
2801 tg3_writephy(tp, 0x13, 0x0000);
2803 tg3_writephy(tp, 0x11, 0x0a50);
2805 tg3_writephy(tp, 0x11, 0x0a10);
2807 /* Wait for signal to stabilize */
2808 /* XXX schedule_timeout() ... */
2809 for (i = 0; i < 15000; i++)
2812 /* Deselect the channel register so we can read the PHYID
2815 tg3_writephy(tp, 0x10, 0x8011);
2818 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2821 u32 sg_dig_ctrl, sg_dig_status;
2822 u32 serdes_cfg, expected_sg_dig_ctrl;
2823 int workaround, port_a;
2824 int current_link_up;
2827 expected_sg_dig_ctrl = 0;
2830 current_link_up = 0;
2832 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2833 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2835 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2838 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2839 /* preserve bits 20-23 for voltage regulator */
2840 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2843 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2845 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2846 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2848 u32 val = serdes_cfg;
2854 tw32_f(MAC_SERDES_CFG, val);
2857 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2859 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2860 tg3_setup_flow_control(tp, 0, 0);
2861 current_link_up = 1;
2866 /* Want auto-negotiation. */
2867 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2869 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2870 if (flowctrl & ADVERTISE_1000XPAUSE)
2871 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2872 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2873 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2875 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2876 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2877 tp->serdes_counter &&
2878 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2879 MAC_STATUS_RCVD_CFG)) ==
2880 MAC_STATUS_PCS_SYNCED)) {
2881 tp->serdes_counter--;
2882 current_link_up = 1;
2887 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2888 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2890 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2892 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2893 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2894 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2895 MAC_STATUS_SIGNAL_DET)) {
2896 sg_dig_status = tr32(SG_DIG_STATUS);
2897 mac_status = tr32(MAC_STATUS);
2899 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2900 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2901 u32 local_adv = 0, remote_adv = 0;
2903 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2904 local_adv |= ADVERTISE_1000XPAUSE;
2905 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2906 local_adv |= ADVERTISE_1000XPSE_ASYM;
2908 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2909 remote_adv |= LPA_1000XPAUSE;
2910 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2911 remote_adv |= LPA_1000XPAUSE_ASYM;
2913 tg3_setup_flow_control(tp, local_adv, remote_adv);
2914 current_link_up = 1;
2915 tp->serdes_counter = 0;
2916 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2917 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2918 if (tp->serdes_counter)
2919 tp->serdes_counter--;
2922 u32 val = serdes_cfg;
2929 tw32_f(MAC_SERDES_CFG, val);
2932 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2935 /* Link parallel detection - link is up */
2936 /* only if we have PCS_SYNC and not */
2937 /* receiving config code words */
2938 mac_status = tr32(MAC_STATUS);
2939 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2940 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2941 tg3_setup_flow_control(tp, 0, 0);
2942 current_link_up = 1;
2944 TG3_FLG2_PARALLEL_DETECT;
2945 tp->serdes_counter =
2946 SERDES_PARALLEL_DET_TIMEOUT;
2948 goto restart_autoneg;
2952 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2953 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2957 return current_link_up;
2960 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2962 int current_link_up = 0;
2964 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2967 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2968 u32 txflags, rxflags;
2971 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2972 u32 local_adv = 0, remote_adv = 0;
2974 if (txflags & ANEG_CFG_PS1)
2975 local_adv |= ADVERTISE_1000XPAUSE;
2976 if (txflags & ANEG_CFG_PS2)
2977 local_adv |= ADVERTISE_1000XPSE_ASYM;
2979 if (rxflags & MR_LP_ADV_SYM_PAUSE)
2980 remote_adv |= LPA_1000XPAUSE;
2981 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2982 remote_adv |= LPA_1000XPAUSE_ASYM;
2984 tg3_setup_flow_control(tp, local_adv, remote_adv);
2986 current_link_up = 1;
2988 for (i = 0; i < 30; i++) {
2991 (MAC_STATUS_SYNC_CHANGED |
2992 MAC_STATUS_CFG_CHANGED));
2994 if ((tr32(MAC_STATUS) &
2995 (MAC_STATUS_SYNC_CHANGED |
2996 MAC_STATUS_CFG_CHANGED)) == 0)
3000 mac_status = tr32(MAC_STATUS);
3001 if (current_link_up == 0 &&
3002 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3003 !(mac_status & MAC_STATUS_RCVD_CFG))
3004 current_link_up = 1;
3006 tg3_setup_flow_control(tp, 0, 0);
3008 /* Forcing 1000FD link up. */
3009 current_link_up = 1;
3011 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3014 tw32_f(MAC_MODE, tp->mac_mode);
3019 return current_link_up;
3022 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3025 u16 orig_active_speed;
3026 u8 orig_active_duplex;
3028 int current_link_up;
3031 orig_pause_cfg = tp->link_config.active_flowctrl;
3032 orig_active_speed = tp->link_config.active_speed;
3033 orig_active_duplex = tp->link_config.active_duplex;
3035 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3036 netif_carrier_ok(tp->dev) &&
3037 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3038 mac_status = tr32(MAC_STATUS);
3039 mac_status &= (MAC_STATUS_PCS_SYNCED |
3040 MAC_STATUS_SIGNAL_DET |
3041 MAC_STATUS_CFG_CHANGED |
3042 MAC_STATUS_RCVD_CFG);
3043 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3044 MAC_STATUS_SIGNAL_DET)) {
3045 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3046 MAC_STATUS_CFG_CHANGED));
3051 tw32_f(MAC_TX_AUTO_NEG, 0);
3053 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3054 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3055 tw32_f(MAC_MODE, tp->mac_mode);
3058 if (tp->phy_id == PHY_ID_BCM8002)
3059 tg3_init_bcm8002(tp);
3061 /* Enable link change event even when serdes polling. */
3062 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3065 current_link_up = 0;
3066 mac_status = tr32(MAC_STATUS);
3068 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3069 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3071 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3073 tp->hw_status->status =
3074 (SD_STATUS_UPDATED |
3075 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3077 for (i = 0; i < 100; i++) {
3078 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3079 MAC_STATUS_CFG_CHANGED));
3081 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3082 MAC_STATUS_CFG_CHANGED |
3083 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3087 mac_status = tr32(MAC_STATUS);
3088 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3089 current_link_up = 0;
3090 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3091 tp->serdes_counter == 0) {
3092 tw32_f(MAC_MODE, (tp->mac_mode |
3093 MAC_MODE_SEND_CONFIGS));
3095 tw32_f(MAC_MODE, tp->mac_mode);
3099 if (current_link_up == 1) {
3100 tp->link_config.active_speed = SPEED_1000;
3101 tp->link_config.active_duplex = DUPLEX_FULL;
3102 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3103 LED_CTRL_LNKLED_OVERRIDE |
3104 LED_CTRL_1000MBPS_ON));
3106 tp->link_config.active_speed = SPEED_INVALID;
3107 tp->link_config.active_duplex = DUPLEX_INVALID;
3108 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3109 LED_CTRL_LNKLED_OVERRIDE |
3110 LED_CTRL_TRAFFIC_OVERRIDE));
3113 if (current_link_up != netif_carrier_ok(tp->dev)) {
3114 if (current_link_up)
3115 netif_carrier_on(tp->dev);
3117 netif_carrier_off(tp->dev);
3118 tg3_link_report(tp);
3120 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3121 if (orig_pause_cfg != now_pause_cfg ||
3122 orig_active_speed != tp->link_config.active_speed ||
3123 orig_active_duplex != tp->link_config.active_duplex)
3124 tg3_link_report(tp);
3130 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3132 int current_link_up, err = 0;
3136 u32 local_adv, remote_adv;
3138 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3139 tw32_f(MAC_MODE, tp->mac_mode);
3145 (MAC_STATUS_SYNC_CHANGED |
3146 MAC_STATUS_CFG_CHANGED |
3147 MAC_STATUS_MI_COMPLETION |
3148 MAC_STATUS_LNKSTATE_CHANGED));
3154 current_link_up = 0;
3155 current_speed = SPEED_INVALID;
3156 current_duplex = DUPLEX_INVALID;
3158 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3159 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3161 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3162 bmsr |= BMSR_LSTATUS;
3164 bmsr &= ~BMSR_LSTATUS;
3167 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3169 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3170 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3171 tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3172 /* do nothing, just check for link up at the end */
3173 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3176 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3177 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3178 ADVERTISE_1000XPAUSE |
3179 ADVERTISE_1000XPSE_ASYM |
3182 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3184 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3185 new_adv |= ADVERTISE_1000XHALF;
3186 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3187 new_adv |= ADVERTISE_1000XFULL;
3189 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3190 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3191 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3192 tg3_writephy(tp, MII_BMCR, bmcr);
3194 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3195 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3196 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3203 bmcr &= ~BMCR_SPEED1000;
3204 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3206 if (tp->link_config.duplex == DUPLEX_FULL)
3207 new_bmcr |= BMCR_FULLDPLX;
3209 if (new_bmcr != bmcr) {
3210 /* BMCR_SPEED1000 is a reserved bit that needs
3211 * to be set on write.
3213 new_bmcr |= BMCR_SPEED1000;
3215 /* Force a linkdown */
3216 if (netif_carrier_ok(tp->dev)) {
3219 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3220 adv &= ~(ADVERTISE_1000XFULL |
3221 ADVERTISE_1000XHALF |
3223 tg3_writephy(tp, MII_ADVERTISE, adv);
3224 tg3_writephy(tp, MII_BMCR, bmcr |
3228 netif_carrier_off(tp->dev);
3230 tg3_writephy(tp, MII_BMCR, new_bmcr);
3232 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3233 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3234 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3236 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3237 bmsr |= BMSR_LSTATUS;
3239 bmsr &= ~BMSR_LSTATUS;
3241 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3245 if (bmsr & BMSR_LSTATUS) {
3246 current_speed = SPEED_1000;
3247 current_link_up = 1;
3248 if (bmcr & BMCR_FULLDPLX)
3249 current_duplex = DUPLEX_FULL;
3251 current_duplex = DUPLEX_HALF;
3256 if (bmcr & BMCR_ANENABLE) {
3259 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3260 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3261 common = local_adv & remote_adv;
3262 if (common & (ADVERTISE_1000XHALF |
3263 ADVERTISE_1000XFULL)) {
3264 if (common & ADVERTISE_1000XFULL)
3265 current_duplex = DUPLEX_FULL;
3267 current_duplex = DUPLEX_HALF;
3270 current_link_up = 0;
3274 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3275 tg3_setup_flow_control(tp, local_adv, remote_adv);
3277 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3278 if (tp->link_config.active_duplex == DUPLEX_HALF)
3279 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3281 tw32_f(MAC_MODE, tp->mac_mode);
3284 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3286 tp->link_config.active_speed = current_speed;
3287 tp->link_config.active_duplex = current_duplex;
3289 if (current_link_up != netif_carrier_ok(tp->dev)) {
3290 if (current_link_up)
3291 netif_carrier_on(tp->dev);
3293 netif_carrier_off(tp->dev);
3294 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3296 tg3_link_report(tp);
3301 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3303 if (tp->serdes_counter) {
3304 /* Give autoneg time to complete. */
3305 tp->serdes_counter--;
3308 if (!netif_carrier_ok(tp->dev) &&
3309 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3312 tg3_readphy(tp, MII_BMCR, &bmcr);
3313 if (bmcr & BMCR_ANENABLE) {
3316 /* Select shadow register 0x1f */
3317 tg3_writephy(tp, 0x1c, 0x7c00);
3318 tg3_readphy(tp, 0x1c, &phy1);
3320 /* Select expansion interrupt status register */
3321 tg3_writephy(tp, 0x17, 0x0f01);
3322 tg3_readphy(tp, 0x15, &phy2);
3323 tg3_readphy(tp, 0x15, &phy2);
3325 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3326 /* We have signal detect and not receiving
3327 * config code words, link is up by parallel
3331 bmcr &= ~BMCR_ANENABLE;
3332 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3333 tg3_writephy(tp, MII_BMCR, bmcr);
3334 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3338 else if (netif_carrier_ok(tp->dev) &&
3339 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3340 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3343 /* Select expansion interrupt status register */
3344 tg3_writephy(tp, 0x17, 0x0f01);
3345 tg3_readphy(tp, 0x15, &phy2);
3349 /* Config code words received, turn on autoneg. */
3350 tg3_readphy(tp, MII_BMCR, &bmcr);
3351 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3353 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3359 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3363 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3364 err = tg3_setup_fiber_phy(tp, force_reset);
3365 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3366 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3368 err = tg3_setup_copper_phy(tp, force_reset);
3371 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3372 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3375 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3376 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3378 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3383 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3384 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3385 tw32(GRC_MISC_CFG, val);
3388 if (tp->link_config.active_speed == SPEED_1000 &&
3389 tp->link_config.active_duplex == DUPLEX_HALF)
3390 tw32(MAC_TX_LENGTHS,
3391 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3392 (6 << TX_LENGTHS_IPG_SHIFT) |
3393 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3395 tw32(MAC_TX_LENGTHS,
3396 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3397 (6 << TX_LENGTHS_IPG_SHIFT) |
3398 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3400 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3401 if (netif_carrier_ok(tp->dev)) {
3402 tw32(HOSTCC_STAT_COAL_TICKS,
3403 tp->coal.stats_block_coalesce_usecs);
3405 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3409 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3410 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3411 if (!netif_carrier_ok(tp->dev))
3412 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3415 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3416 tw32(PCIE_PWR_MGMT_THRESH, val);
3422 /* This is called whenever we suspect that the system chipset is re-
3423 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3424 * is bogus tx completions. We try to recover by setting the
3425 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3428 static void tg3_tx_recover(struct tg3 *tp)
3430 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3431 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3433 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3434 "mapped I/O cycles to the network device, attempting to "
3435 "recover. Please report the problem to the driver maintainer "
3436 "and include system chipset information.\n", tp->dev->name);
3438 spin_lock(&tp->lock);
3439 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3440 spin_unlock(&tp->lock);
3443 static inline u32 tg3_tx_avail(struct tg3 *tp)
3446 return (tp->tx_pending -
3447 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3450 /* Tigon3 never reports partial packet sends. So we do not
3451 * need special logic to handle SKBs that have not had all
3452 * of their frags sent yet, like SunGEM does.
3454 static void tg3_tx(struct tg3 *tp)
3456 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3457 u32 sw_idx = tp->tx_cons;
3459 while (sw_idx != hw_idx) {
3460 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3461 struct sk_buff *skb = ri->skb;
3464 if (unlikely(skb == NULL)) {
3469 pci_unmap_single(tp->pdev,
3470 pci_unmap_addr(ri, mapping),
3476 sw_idx = NEXT_TX(sw_idx);
3478 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3479 ri = &tp->tx_buffers[sw_idx];
3480 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3483 pci_unmap_page(tp->pdev,
3484 pci_unmap_addr(ri, mapping),
3485 skb_shinfo(skb)->frags[i].size,
3488 sw_idx = NEXT_TX(sw_idx);
3493 if (unlikely(tx_bug)) {
3499 tp->tx_cons = sw_idx;
3501 /* Need to make the tx_cons update visible to tg3_start_xmit()
3502 * before checking for netif_queue_stopped(). Without the
3503 * memory barrier, there is a small possibility that tg3_start_xmit()
3504 * will miss it and cause the queue to be stopped forever.
3508 if (unlikely(netif_queue_stopped(tp->dev) &&
3509 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3510 netif_tx_lock(tp->dev);
3511 if (netif_queue_stopped(tp->dev) &&
3512 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3513 netif_wake_queue(tp->dev);
3514 netif_tx_unlock(tp->dev);
3518 /* Returns size of skb allocated or < 0 on error.
3520 * We only need to fill in the address because the other members
3521 * of the RX descriptor are invariant, see tg3_init_rings.
3523 * Note the purposeful assymetry of cpu vs. chip accesses. For
3524 * posting buffers we only dirty the first cache line of the RX
3525 * descriptor (containing the address). Whereas for the RX status
3526 * buffers the cpu only reads the last cacheline of the RX descriptor
3527 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3529 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3530 int src_idx, u32 dest_idx_unmasked)
3532 struct tg3_rx_buffer_desc *desc;
3533 struct ring_info *map, *src_map;
3534 struct sk_buff *skb;
3536 int skb_size, dest_idx;
3539 switch (opaque_key) {
3540 case RXD_OPAQUE_RING_STD:
3541 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3542 desc = &tp->rx_std[dest_idx];
3543 map = &tp->rx_std_buffers[dest_idx];
3545 src_map = &tp->rx_std_buffers[src_idx];
3546 skb_size = tp->rx_pkt_buf_sz;
3549 case RXD_OPAQUE_RING_JUMBO:
3550 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3551 desc = &tp->rx_jumbo[dest_idx];
3552 map = &tp->rx_jumbo_buffers[dest_idx];
3554 src_map = &tp->rx_jumbo_buffers[src_idx];
3555 skb_size = RX_JUMBO_PKT_BUF_SZ;
3562 /* Do not overwrite any of the map or rp information
3563 * until we are sure we can commit to a new buffer.
3565 * Callers depend upon this behavior and assume that
3566 * we leave everything unchanged if we fail.
3568 skb = netdev_alloc_skb(tp->dev, skb_size);
3572 skb_reserve(skb, tp->rx_offset);
3574 mapping = pci_map_single(tp->pdev, skb->data,
3575 skb_size - tp->rx_offset,
3576 PCI_DMA_FROMDEVICE);
3579 pci_unmap_addr_set(map, mapping, mapping);
3581 if (src_map != NULL)
3582 src_map->skb = NULL;
3584 desc->addr_hi = ((u64)mapping >> 32);
3585 desc->addr_lo = ((u64)mapping & 0xffffffff);
3590 /* We only need to move over in the address because the other
3591 * members of the RX descriptor are invariant. See notes above
3592 * tg3_alloc_rx_skb for full details.
3594 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3595 int src_idx, u32 dest_idx_unmasked)
3597 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3598 struct ring_info *src_map, *dest_map;
3601 switch (opaque_key) {
3602 case RXD_OPAQUE_RING_STD:
3603 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3604 dest_desc = &tp->rx_std[dest_idx];
3605 dest_map = &tp->rx_std_buffers[dest_idx];
3606 src_desc = &tp->rx_std[src_idx];
3607 src_map = &tp->rx_std_buffers[src_idx];
3610 case RXD_OPAQUE_RING_JUMBO:
3611 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3612 dest_desc = &tp->rx_jumbo[dest_idx];
3613 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3614 src_desc = &tp->rx_jumbo[src_idx];
3615 src_map = &tp->rx_jumbo_buffers[src_idx];
3622 dest_map->skb = src_map->skb;
3623 pci_unmap_addr_set(dest_map, mapping,
3624 pci_unmap_addr(src_map, mapping));
3625 dest_desc->addr_hi = src_desc->addr_hi;
3626 dest_desc->addr_lo = src_desc->addr_lo;
3628 src_map->skb = NULL;
3631 #if TG3_VLAN_TAG_USED
3632 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3634 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3638 /* The RX ring scheme is composed of multiple rings which post fresh
3639 * buffers to the chip, and one special ring the chip uses to report
3640 * status back to the host.
3642 * The special ring reports the status of received packets to the
3643 * host. The chip does not write into the original descriptor the
3644 * RX buffer was obtained from. The chip simply takes the original
3645 * descriptor as provided by the host, updates the status and length
3646 * field, then writes this into the next status ring entry.
3648 * Each ring the host uses to post buffers to the chip is described
3649 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3650 * it is first placed into the on-chip ram. When the packet's length
3651 * is known, it walks down the TG3_BDINFO entries to select the ring.
3652 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3653 * which is within the range of the new packet's length is chosen.
3655 * The "separate ring for rx status" scheme may sound queer, but it makes
3656 * sense from a cache coherency perspective. If only the host writes
3657 * to the buffer post rings, and only the chip writes to the rx status
3658 * rings, then cache lines never move beyond shared-modified state.
3659 * If both the host and chip were to write into the same ring, cache line
3660 * eviction could occur since both entities want it in an exclusive state.
3662 static int tg3_rx(struct tg3 *tp, int budget)
3664 u32 work_mask, rx_std_posted = 0;
3665 u32 sw_idx = tp->rx_rcb_ptr;
3669 hw_idx = tp->hw_status->idx[0].rx_producer;
3671 * We need to order the read of hw_idx and the read of
3672 * the opaque cookie.
3677 while (sw_idx != hw_idx && budget > 0) {
3678 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3680 struct sk_buff *skb;
3681 dma_addr_t dma_addr;
3682 u32 opaque_key, desc_idx, *post_ptr;
3684 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3685 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3686 if (opaque_key == RXD_OPAQUE_RING_STD) {
3687 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3689 skb = tp->rx_std_buffers[desc_idx].skb;
3690 post_ptr = &tp->rx_std_ptr;
3692 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3693 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3695 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3696 post_ptr = &tp->rx_jumbo_ptr;
3699 goto next_pkt_nopost;
3702 work_mask |= opaque_key;
3704 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3705 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3707 tg3_recycle_rx(tp, opaque_key,
3708 desc_idx, *post_ptr);
3710 /* Other statistics kept track of by card. */
3711 tp->net_stats.rx_dropped++;
3715 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3717 if (len > RX_COPY_THRESHOLD
3718 && tp->rx_offset == 2
3719 /* rx_offset != 2 iff this is a 5701 card running
3720 * in PCI-X mode [see tg3_get_invariants()] */
3724 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3725 desc_idx, *post_ptr);
3729 pci_unmap_single(tp->pdev, dma_addr,
3730 skb_size - tp->rx_offset,
3731 PCI_DMA_FROMDEVICE);
3735 struct sk_buff *copy_skb;
3737 tg3_recycle_rx(tp, opaque_key,
3738 desc_idx, *post_ptr);
3740 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3741 if (copy_skb == NULL)
3742 goto drop_it_no_recycle;
3744 skb_reserve(copy_skb, 2);
3745 skb_put(copy_skb, len);
3746 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3747 skb_copy_from_linear_data(skb, copy_skb->data, len);
3748 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3750 /* We'll reuse the original ring buffer. */
3754 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3755 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3756 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3757 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3758 skb->ip_summed = CHECKSUM_UNNECESSARY;
3760 skb->ip_summed = CHECKSUM_NONE;
3762 skb->protocol = eth_type_trans(skb, tp->dev);
3763 #if TG3_VLAN_TAG_USED
3764 if (tp->vlgrp != NULL &&
3765 desc->type_flags & RXD_FLAG_VLAN) {
3766 tg3_vlan_rx(tp, skb,
3767 desc->err_vlan & RXD_VLAN_MASK);
3770 netif_receive_skb(skb);
3772 tp->dev->last_rx = jiffies;
3779 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3780 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3782 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3783 TG3_64BIT_REG_LOW, idx);
3784 work_mask &= ~RXD_OPAQUE_RING_STD;
3789 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3791 /* Refresh hw_idx to see if there is new work */
3792 if (sw_idx == hw_idx) {
3793 hw_idx = tp->hw_status->idx[0].rx_producer;
3798 /* ACK the status ring. */
3799 tp->rx_rcb_ptr = sw_idx;
3800 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3802 /* Refill RX ring(s). */
3803 if (work_mask & RXD_OPAQUE_RING_STD) {
3804 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3805 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3808 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3809 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3810 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3818 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3820 struct tg3_hw_status *sblk = tp->hw_status;
3822 /* handle link change and other phy events */
3823 if (!(tp->tg3_flags &
3824 (TG3_FLAG_USE_LINKCHG_REG |
3825 TG3_FLAG_POLL_SERDES))) {
3826 if (sblk->status & SD_STATUS_LINK_CHG) {
3827 sblk->status = SD_STATUS_UPDATED |
3828 (sblk->status & ~SD_STATUS_LINK_CHG);
3829 spin_lock(&tp->lock);
3830 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
3832 (MAC_STATUS_SYNC_CHANGED |
3833 MAC_STATUS_CFG_CHANGED |
3834 MAC_STATUS_MI_COMPLETION |
3835 MAC_STATUS_LNKSTATE_CHANGED));
3838 tg3_setup_phy(tp, 0);
3839 spin_unlock(&tp->lock);
3843 /* run TX completion thread */
3844 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3846 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3850 /* run RX thread, within the bounds set by NAPI.
3851 * All RX "locking" is done by ensuring outside
3852 * code synchronizes with tg3->napi.poll()
3854 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3855 work_done += tg3_rx(tp, budget - work_done);
3860 static int tg3_poll(struct napi_struct *napi, int budget)
3862 struct tg3 *tp = container_of(napi, struct tg3, napi);
3864 struct tg3_hw_status *sblk = tp->hw_status;
3867 work_done = tg3_poll_work(tp, work_done, budget);
3869 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3872 if (unlikely(work_done >= budget))
3875 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3876 /* tp->last_tag is used in tg3_restart_ints() below
3877 * to tell the hw how much work has been processed,
3878 * so we must read it before checking for more work.
3880 tp->last_tag = sblk->status_tag;
3883 sblk->status &= ~SD_STATUS_UPDATED;
3885 if (likely(!tg3_has_work(tp))) {
3886 netif_rx_complete(tp->dev, napi);
3887 tg3_restart_ints(tp);
3895 /* work_done is guaranteed to be less than budget. */
3896 netif_rx_complete(tp->dev, napi);
3897 schedule_work(&tp->reset_task);
3901 static void tg3_irq_quiesce(struct tg3 *tp)
3903 BUG_ON(tp->irq_sync);
3908 synchronize_irq(tp->pdev->irq);
3911 static inline int tg3_irq_sync(struct tg3 *tp)
3913 return tp->irq_sync;
3916 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3917 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3918 * with as well. Most of the time, this is not necessary except when
3919 * shutting down the device.
3921 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3923 spin_lock_bh(&tp->lock);
3925 tg3_irq_quiesce(tp);
3928 static inline void tg3_full_unlock(struct tg3 *tp)
3930 spin_unlock_bh(&tp->lock);
3933 /* One-shot MSI handler - Chip automatically disables interrupt
3934 * after sending MSI so driver doesn't have to do it.
3936 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3938 struct net_device *dev = dev_id;
3939 struct tg3 *tp = netdev_priv(dev);
3941 prefetch(tp->hw_status);
3942 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3944 if (likely(!tg3_irq_sync(tp)))
3945 netif_rx_schedule(dev, &tp->napi);
3950 /* MSI ISR - No need to check for interrupt sharing and no need to
3951 * flush status block and interrupt mailbox. PCI ordering rules
3952 * guarantee that MSI will arrive after the status block.
3954 static irqreturn_t tg3_msi(int irq, void *dev_id)
3956 struct net_device *dev = dev_id;
3957 struct tg3 *tp = netdev_priv(dev);
3959 prefetch(tp->hw_status);
3960 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3962 * Writing any value to intr-mbox-0 clears PCI INTA# and
3963 * chip-internal interrupt pending events.
3964 * Writing non-zero to intr-mbox-0 additional tells the
3965 * NIC to stop sending us irqs, engaging "in-intr-handler"
3968 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3969 if (likely(!tg3_irq_sync(tp)))
3970 netif_rx_schedule(dev, &tp->napi);
3972 return IRQ_RETVAL(1);
3975 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3977 struct net_device *dev = dev_id;
3978 struct tg3 *tp = netdev_priv(dev);
3979 struct tg3_hw_status *sblk = tp->hw_status;
3980 unsigned int handled = 1;
3982 /* In INTx mode, it is possible for the interrupt to arrive at
3983 * the CPU before the status block posted prior to the interrupt.
3984 * Reading the PCI State register will confirm whether the
3985 * interrupt is ours and will flush the status block.
3987 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3988 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3989 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3996 * Writing any value to intr-mbox-0 clears PCI INTA# and
3997 * chip-internal interrupt pending events.
3998 * Writing non-zero to intr-mbox-0 additional tells the
3999 * NIC to stop sending us irqs, engaging "in-intr-handler"
4002 * Flush the mailbox to de-assert the IRQ immediately to prevent
4003 * spurious interrupts. The flush impacts performance but
4004 * excessive spurious interrupts can be worse in some cases.
4006 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4007 if (tg3_irq_sync(tp))
4009 sblk->status &= ~SD_STATUS_UPDATED;
4010 if (likely(tg3_has_work(tp))) {
4011 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4012 netif_rx_schedule(dev, &tp->napi);
4014 /* No work, shared interrupt perhaps? re-enable
4015 * interrupts, and flush that PCI write
4017 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4021 return IRQ_RETVAL(handled);
4024 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4026 struct net_device *dev = dev_id;
4027 struct tg3 *tp = netdev_priv(dev);
4028 struct tg3_hw_status *sblk = tp->hw_status;
4029 unsigned int handled = 1;
4031 /* In INTx mode, it is possible for the interrupt to arrive at
4032 * the CPU before the status block posted prior to the interrupt.
4033 * Reading the PCI State register will confirm whether the
4034 * interrupt is ours and will flush the status block.
4036 if (unlikely(sblk->status_tag == tp->last_tag)) {
4037 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4038 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4045 * writing any value to intr-mbox-0 clears PCI INTA# and
4046 * chip-internal interrupt pending events.
4047 * writing non-zero to intr-mbox-0 additional tells the
4048 * NIC to stop sending us irqs, engaging "in-intr-handler"
4051 * Flush the mailbox to de-assert the IRQ immediately to prevent
4052 * spurious interrupts. The flush impacts performance but
4053 * excessive spurious interrupts can be worse in some cases.
4055 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4056 if (tg3_irq_sync(tp))
4058 if (netif_rx_schedule_prep(dev, &tp->napi)) {
4059 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4060 /* Update last_tag to mark that this status has been
4061 * seen. Because interrupt may be shared, we may be
4062 * racing with tg3_poll(), so only update last_tag
4063 * if tg3_poll() is not scheduled.
4065 tp->last_tag = sblk->status_tag;
4066 __netif_rx_schedule(dev, &tp->napi);
4069 return IRQ_RETVAL(handled);
4072 /* ISR for interrupt test */
4073 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4075 struct net_device *dev = dev_id;
4076 struct tg3 *tp = netdev_priv(dev);
4077 struct tg3_hw_status *sblk = tp->hw_status;
4079 if ((sblk->status & SD_STATUS_UPDATED) ||
4080 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4081 tg3_disable_ints(tp);
4082 return IRQ_RETVAL(1);
4084 return IRQ_RETVAL(0);
4087 static int tg3_init_hw(struct tg3 *, int);
4088 static int tg3_halt(struct tg3 *, int, int);
4090 /* Restart hardware after configuration changes, self-test, etc.
4091 * Invoked with tp->lock held.
4093 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4094 __releases(tp->lock)
4095 __acquires(tp->lock)
4099 err = tg3_init_hw(tp, reset_phy);
4101 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4102 "aborting.\n", tp->dev->name);
4103 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4104 tg3_full_unlock(tp);
4105 del_timer_sync(&tp->timer);
4107 napi_enable(&tp->napi);
4109 tg3_full_lock(tp, 0);
4114 #ifdef CONFIG_NET_POLL_CONTROLLER
4115 static void tg3_poll_controller(struct net_device *dev)
4117 struct tg3 *tp = netdev_priv(dev);
4119 tg3_interrupt(tp->pdev->irq, dev);
4123 static void tg3_reset_task(struct work_struct *work)
4125 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4126 unsigned int restart_timer;
4128 tg3_full_lock(tp, 0);
4130 if (!netif_running(tp->dev)) {
4131 tg3_full_unlock(tp);
4135 tg3_full_unlock(tp);
4139 tg3_full_lock(tp, 1);
4141 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4142 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4144 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4145 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4146 tp->write32_rx_mbox = tg3_write_flush_reg32;
4147 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4148 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4151 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4152 if (tg3_init_hw(tp, 1))
4155 tg3_netif_start(tp);
4158 mod_timer(&tp->timer, jiffies + 1);
4161 tg3_full_unlock(tp);
4164 static void tg3_dump_short_state(struct tg3 *tp)
4166 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4167 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4168 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4169 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4172 static void tg3_tx_timeout(struct net_device *dev)
4174 struct tg3 *tp = netdev_priv(dev);
4176 if (netif_msg_tx_err(tp)) {
4177 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4179 tg3_dump_short_state(tp);
4182 schedule_work(&tp->reset_task);
4185 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4186 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4188 u32 base = (u32) mapping & 0xffffffff;
4190 return ((base > 0xffffdcc0) &&
4191 (base + len + 8 < base));
4194 /* Test for DMA addresses > 40-bit */
4195 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4198 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4199 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4200 return (((u64) mapping + len) > DMA_40BIT_MASK);
4207 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4209 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4210 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4211 u32 last_plus_one, u32 *start,
4212 u32 base_flags, u32 mss)
4214 struct sk_buff *new_skb;
4215 dma_addr_t new_addr = 0;
4219 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4220 new_skb = skb_copy(skb, GFP_ATOMIC);
4222 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4224 new_skb = skb_copy_expand(skb,
4225 skb_headroom(skb) + more_headroom,
4226 skb_tailroom(skb), GFP_ATOMIC);
4232 /* New SKB is guaranteed to be linear. */
4234 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4236 /* Make sure new skb does not cross any 4G boundaries.
4237 * Drop the packet if it does.
4239 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4241 dev_kfree_skb(new_skb);
4244 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4245 base_flags, 1 | (mss << 1));
4246 *start = NEXT_TX(entry);
4250 /* Now clean up the sw ring entries. */
4252 while (entry != last_plus_one) {
4256 len = skb_headlen(skb);
4258 len = skb_shinfo(skb)->frags[i-1].size;
4259 pci_unmap_single(tp->pdev,
4260 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4261 len, PCI_DMA_TODEVICE);
4263 tp->tx_buffers[entry].skb = new_skb;
4264 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4266 tp->tx_buffers[entry].skb = NULL;
4268 entry = NEXT_TX(entry);
4277 static void tg3_set_txd(struct tg3 *tp, int entry,
4278 dma_addr_t mapping, int len, u32 flags,
4281 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4282 int is_end = (mss_and_is_end & 0x1);
4283 u32 mss = (mss_and_is_end >> 1);
4287 flags |= TXD_FLAG_END;
4288 if (flags & TXD_FLAG_VLAN) {
4289 vlan_tag = flags >> 16;
4292 vlan_tag |= (mss << TXD_MSS_SHIFT);
4294 txd->addr_hi = ((u64) mapping >> 32);
4295 txd->addr_lo = ((u64) mapping & 0xffffffff);
4296 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4297 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4300 /* hard_start_xmit for devices that don't have any bugs and
4301 * support TG3_FLG2_HW_TSO_2 only.
4303 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4305 struct tg3 *tp = netdev_priv(dev);
4307 u32 len, entry, base_flags, mss;
4309 len = skb_headlen(skb);
4311 /* We are running in BH disabled context with netif_tx_lock
4312 * and TX reclaim runs via tp->napi.poll inside of a software
4313 * interrupt. Furthermore, IRQ processing runs lockless so we have
4314 * no IRQ context deadlocks to worry about either. Rejoice!
4316 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4317 if (!netif_queue_stopped(dev)) {
4318 netif_stop_queue(dev);
4320 /* This is a hard error, log it. */
4321 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4322 "queue awake!\n", dev->name);
4324 return NETDEV_TX_BUSY;
4327 entry = tp->tx_prod;
4330 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4331 int tcp_opt_len, ip_tcp_len;
4333 if (skb_header_cloned(skb) &&
4334 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4339 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4340 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4342 struct iphdr *iph = ip_hdr(skb);
4344 tcp_opt_len = tcp_optlen(skb);
4345 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4348 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4349 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4352 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4353 TXD_FLAG_CPU_POST_DMA);
4355 tcp_hdr(skb)->check = 0;
4358 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4359 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4360 #if TG3_VLAN_TAG_USED
4361 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4362 base_flags |= (TXD_FLAG_VLAN |
4363 (vlan_tx_tag_get(skb) << 16));
4366 /* Queue skb data, a.k.a. the main skb fragment. */
4367 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4369 tp->tx_buffers[entry].skb = skb;
4370 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4372 tg3_set_txd(tp, entry, mapping, len, base_flags,
4373 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4375 entry = NEXT_TX(entry);
4377 /* Now loop through additional data fragments, and queue them. */
4378 if (skb_shinfo(skb)->nr_frags > 0) {
4379 unsigned int i, last;
4381 last = skb_shinfo(skb)->nr_frags - 1;
4382 for (i = 0; i <= last; i++) {
4383 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4386 mapping = pci_map_page(tp->pdev,
4389 len, PCI_DMA_TODEVICE);
4391 tp->tx_buffers[entry].skb = NULL;
4392 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4394 tg3_set_txd(tp, entry, mapping, len,
4395 base_flags, (i == last) | (mss << 1));
4397 entry = NEXT_TX(entry);
4401 /* Packets are ready, update Tx producer idx local and on card. */
4402 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4404 tp->tx_prod = entry;
4405 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4406 netif_stop_queue(dev);
4407 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4408 netif_wake_queue(tp->dev);
4414 dev->trans_start = jiffies;
4416 return NETDEV_TX_OK;
4419 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4421 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4422 * TSO header is greater than 80 bytes.
4424 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4426 struct sk_buff *segs, *nskb;
4428 /* Estimate the number of fragments in the worst case */
4429 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4430 netif_stop_queue(tp->dev);
4431 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4432 return NETDEV_TX_BUSY;
4434 netif_wake_queue(tp->dev);
4437 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4439 goto tg3_tso_bug_end;
4445 tg3_start_xmit_dma_bug(nskb, tp->dev);
4451 return NETDEV_TX_OK;
4454 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4455 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4457 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4459 struct tg3 *tp = netdev_priv(dev);
4461 u32 len, entry, base_flags, mss;
4462 int would_hit_hwbug;
4464 len = skb_headlen(skb);
4466 /* We are running in BH disabled context with netif_tx_lock
4467 * and TX reclaim runs via tp->napi.poll inside of a software
4468 * interrupt. Furthermore, IRQ processing runs lockless so we have
4469 * no IRQ context deadlocks to worry about either. Rejoice!
4471 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4472 if (!netif_queue_stopped(dev)) {
4473 netif_stop_queue(dev);
4475 /* This is a hard error, log it. */
4476 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4477 "queue awake!\n", dev->name);
4479 return NETDEV_TX_BUSY;
4482 entry = tp->tx_prod;
4484 if (skb->ip_summed == CHECKSUM_PARTIAL)
4485 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4487 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4489 int tcp_opt_len, ip_tcp_len, hdr_len;
4491 if (skb_header_cloned(skb) &&
4492 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4497 tcp_opt_len = tcp_optlen(skb);
4498 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4500 hdr_len = ip_tcp_len + tcp_opt_len;
4501 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4502 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4503 return (tg3_tso_bug(tp, skb));
4505 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4506 TXD_FLAG_CPU_POST_DMA);
4510 iph->tot_len = htons(mss + hdr_len);
4511 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4512 tcp_hdr(skb)->check = 0;
4513 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4515 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4520 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4521 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4522 if (tcp_opt_len || iph->ihl > 5) {
4525 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4526 mss |= (tsflags << 11);
4529 if (tcp_opt_len || iph->ihl > 5) {
4532 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4533 base_flags |= tsflags << 12;
4537 #if TG3_VLAN_TAG_USED
4538 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4539 base_flags |= (TXD_FLAG_VLAN |
4540 (vlan_tx_tag_get(skb) << 16));
4543 /* Queue skb data, a.k.a. the main skb fragment. */
4544 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4546 tp->tx_buffers[entry].skb = skb;
4547 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4549 would_hit_hwbug = 0;
4551 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4552 would_hit_hwbug = 1;
4553 else if (tg3_4g_overflow_test(mapping, len))
4554 would_hit_hwbug = 1;
4556 tg3_set_txd(tp, entry, mapping, len, base_flags,
4557 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4559 entry = NEXT_TX(entry);
4561 /* Now loop through additional data fragments, and queue them. */
4562 if (skb_shinfo(skb)->nr_frags > 0) {
4563 unsigned int i, last;
4565 last = skb_shinfo(skb)->nr_frags - 1;
4566 for (i = 0; i <= last; i++) {
4567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4570 mapping = pci_map_page(tp->pdev,
4573 len, PCI_DMA_TODEVICE);
4575 tp->tx_buffers[entry].skb = NULL;
4576 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4578 if (tg3_4g_overflow_test(mapping, len))
4579 would_hit_hwbug = 1;
4581 if (tg3_40bit_overflow_test(tp, mapping, len))
4582 would_hit_hwbug = 1;
4584 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4585 tg3_set_txd(tp, entry, mapping, len,
4586 base_flags, (i == last)|(mss << 1));
4588 tg3_set_txd(tp, entry, mapping, len,
4589 base_flags, (i == last));
4591 entry = NEXT_TX(entry);
4595 if (would_hit_hwbug) {
4596 u32 last_plus_one = entry;
4599 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4600 start &= (TG3_TX_RING_SIZE - 1);
4602 /* If the workaround fails due to memory/mapping
4603 * failure, silently drop this packet.
4605 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4606 &start, base_flags, mss))
4612 /* Packets are ready, update Tx producer idx local and on card. */
4613 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4615 tp->tx_prod = entry;
4616 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4617 netif_stop_queue(dev);
4618 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4619 netif_wake_queue(tp->dev);
4625 dev->trans_start = jiffies;
4627 return NETDEV_TX_OK;
4630 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4635 if (new_mtu > ETH_DATA_LEN) {
4636 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4637 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4638 ethtool_op_set_tso(dev, 0);
4641 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4643 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4644 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4645 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4649 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4651 struct tg3 *tp = netdev_priv(dev);
4654 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4657 if (!netif_running(dev)) {
4658 /* We'll just catch it later when the
4661 tg3_set_mtu(dev, tp, new_mtu);
4667 tg3_full_lock(tp, 1);
4669 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4671 tg3_set_mtu(dev, tp, new_mtu);
4673 err = tg3_restart_hw(tp, 0);
4676 tg3_netif_start(tp);
4678 tg3_full_unlock(tp);
4683 /* Free up pending packets in all rx/tx rings.
4685 * The chip has been shut down and the driver detached from
4686 * the networking, so no interrupts or new tx packets will
4687 * end up in the driver. tp->{tx,}lock is not held and we are not
4688 * in an interrupt context and thus may sleep.
4690 static void tg3_free_rings(struct tg3 *tp)
4692 struct ring_info *rxp;
4695 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4696 rxp = &tp->rx_std_buffers[i];
4698 if (rxp->skb == NULL)
4700 pci_unmap_single(tp->pdev,
4701 pci_unmap_addr(rxp, mapping),
4702 tp->rx_pkt_buf_sz - tp->rx_offset,
4703 PCI_DMA_FROMDEVICE);
4704 dev_kfree_skb_any(rxp->skb);
4708 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4709 rxp = &tp->rx_jumbo_buffers[i];
4711 if (rxp->skb == NULL)
4713 pci_unmap_single(tp->pdev,
4714 pci_unmap_addr(rxp, mapping),
4715 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4716 PCI_DMA_FROMDEVICE);
4717 dev_kfree_skb_any(rxp->skb);
4721 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4722 struct tx_ring_info *txp;
4723 struct sk_buff *skb;
4726 txp = &tp->tx_buffers[i];
4734 pci_unmap_single(tp->pdev,
4735 pci_unmap_addr(txp, mapping),
4742 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4743 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4744 pci_unmap_page(tp->pdev,
4745 pci_unmap_addr(txp, mapping),
4746 skb_shinfo(skb)->frags[j].size,
4751 dev_kfree_skb_any(skb);
4755 /* Initialize tx/rx rings for packet processing.
4757 * The chip has been shut down and the driver detached from
4758 * the networking, so no interrupts or new tx packets will
4759 * end up in the driver. tp->{tx,}lock are held and thus
4762 static int tg3_init_rings(struct tg3 *tp)
4766 /* Free up all the SKBs. */
4769 /* Zero out all descriptors. */
4770 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4771 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4772 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4773 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4775 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4776 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4777 (tp->dev->mtu > ETH_DATA_LEN))
4778 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4780 /* Initialize invariants of the rings, we only set this
4781 * stuff once. This works because the card does not
4782 * write into the rx buffer posting rings.
4784 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4785 struct tg3_rx_buffer_desc *rxd;
4787 rxd = &tp->rx_std[i];
4788 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4790 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4791 rxd->opaque = (RXD_OPAQUE_RING_STD |
4792 (i << RXD_OPAQUE_INDEX_SHIFT));
4795 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4796 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4797 struct tg3_rx_buffer_desc *rxd;
4799 rxd = &tp->rx_jumbo[i];
4800 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4802 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4804 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4805 (i << RXD_OPAQUE_INDEX_SHIFT));
4809 /* Now allocate fresh SKBs for each rx ring. */
4810 for (i = 0; i < tp->rx_pending; i++) {
4811 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4812 printk(KERN_WARNING PFX
4813 "%s: Using a smaller RX standard ring, "
4814 "only %d out of %d buffers were allocated "
4816 tp->dev->name, i, tp->rx_pending);
4824 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4825 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4826 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4828 printk(KERN_WARNING PFX
4829 "%s: Using a smaller RX jumbo ring, "
4830 "only %d out of %d buffers were "
4831 "allocated successfully.\n",
4832 tp->dev->name, i, tp->rx_jumbo_pending);
4837 tp->rx_jumbo_pending = i;
4846 * Must not be invoked with interrupt sources disabled and
4847 * the hardware shutdown down.
4849 static void tg3_free_consistent(struct tg3 *tp)
4851 kfree(tp->rx_std_buffers);
4852 tp->rx_std_buffers = NULL;
4854 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4855 tp->rx_std, tp->rx_std_mapping);
4859 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4860 tp->rx_jumbo, tp->rx_jumbo_mapping);
4861 tp->rx_jumbo = NULL;
4864 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4865 tp->rx_rcb, tp->rx_rcb_mapping);
4869 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4870 tp->tx_ring, tp->tx_desc_mapping);
4873 if (tp->hw_status) {
4874 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4875 tp->hw_status, tp->status_mapping);
4876 tp->hw_status = NULL;
4879 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4880 tp->hw_stats, tp->stats_mapping);
4881 tp->hw_stats = NULL;
4886 * Must not be invoked with interrupt sources disabled and
4887 * the hardware shutdown down. Can sleep.
4889 static int tg3_alloc_consistent(struct tg3 *tp)
4891 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4893 TG3_RX_JUMBO_RING_SIZE)) +
4894 (sizeof(struct tx_ring_info) *
4897 if (!tp->rx_std_buffers)
4900 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4901 tp->tx_buffers = (struct tx_ring_info *)
4902 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4904 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4905 &tp->rx_std_mapping);
4909 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4910 &tp->rx_jumbo_mapping);
4915 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4916 &tp->rx_rcb_mapping);
4920 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4921 &tp->tx_desc_mapping);
4925 tp->hw_status = pci_alloc_consistent(tp->pdev,
4927 &tp->status_mapping);
4931 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4932 sizeof(struct tg3_hw_stats),
4933 &tp->stats_mapping);
4937 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4938 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4943 tg3_free_consistent(tp);
4947 #define MAX_WAIT_CNT 1000
4949 /* To stop a block, clear the enable bit and poll till it
4950 * clears. tp->lock is held.
4952 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4957 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4964 /* We can't enable/disable these bits of the
4965 * 5705/5750, just say success.
4978 for (i = 0; i < MAX_WAIT_CNT; i++) {
4981 if ((val & enable_bit) == 0)
4985 if (i == MAX_WAIT_CNT && !silent) {
4986 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4987 "ofs=%lx enable_bit=%x\n",
4995 /* tp->lock is held. */
4996 static int tg3_abort_hw(struct tg3 *tp, int silent)
5000 tg3_disable_ints(tp);
5002 tp->rx_mode &= ~RX_MODE_ENABLE;
5003 tw32_f(MAC_RX_MODE, tp->rx_mode);
5006 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5007 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5008 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5009 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5010 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5011 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5013 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5014 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5015 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5016 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5017 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5018 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5019 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5021 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5022 tw32_f(MAC_MODE, tp->mac_mode);
5025 tp->tx_mode &= ~TX_MODE_ENABLE;
5026 tw32_f(MAC_TX_MODE, tp->tx_mode);
5028 for (i = 0; i < MAX_WAIT_CNT; i++) {
5030 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5033 if (i >= MAX_WAIT_CNT) {
5034 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5035 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5036 tp->dev->name, tr32(MAC_TX_MODE));
5040 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5041 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5042 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5044 tw32(FTQ_RESET, 0xffffffff);
5045 tw32(FTQ_RESET, 0x00000000);
5047 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5048 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5051 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5053 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5058 /* tp->lock is held. */
5059 static int tg3_nvram_lock(struct tg3 *tp)
5061 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5064 if (tp->nvram_lock_cnt == 0) {
5065 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5066 for (i = 0; i < 8000; i++) {
5067 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5072 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5076 tp->nvram_lock_cnt++;
5081 /* tp->lock is held. */
5082 static void tg3_nvram_unlock(struct tg3 *tp)
5084 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5085 if (tp->nvram_lock_cnt > 0)
5086 tp->nvram_lock_cnt--;
5087 if (tp->nvram_lock_cnt == 0)
5088 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5092 /* tp->lock is held. */
5093 static void tg3_enable_nvram_access(struct tg3 *tp)
5095 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5096 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5097 u32 nvaccess = tr32(NVRAM_ACCESS);
5099 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5103 /* tp->lock is held. */
5104 static void tg3_disable_nvram_access(struct tg3 *tp)
5106 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5107 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5108 u32 nvaccess = tr32(NVRAM_ACCESS);
5110 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5114 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5119 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5120 if (apedata != APE_SEG_SIG_MAGIC)
5123 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5124 if (apedata != APE_FW_STATUS_READY)
5127 /* Wait for up to 1 millisecond for APE to service previous event. */
5128 for (i = 0; i < 10; i++) {
5129 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5132 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5134 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5135 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5136 event | APE_EVENT_STATUS_EVENT_PENDING);
5138 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5140 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5146 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5147 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5150 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5155 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5159 case RESET_KIND_INIT:
5160 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5161 APE_HOST_SEG_SIG_MAGIC);
5162 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5163 APE_HOST_SEG_LEN_MAGIC);
5164 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5165 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5166 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5167 APE_HOST_DRIVER_ID_MAGIC);
5168 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5169 APE_HOST_BEHAV_NO_PHYLOCK);
5171 event = APE_EVENT_STATUS_STATE_START;
5173 case RESET_KIND_SHUTDOWN:
5174 event = APE_EVENT_STATUS_STATE_UNLOAD;
5176 case RESET_KIND_SUSPEND:
5177 event = APE_EVENT_STATUS_STATE_SUSPEND;
5183 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5185 tg3_ape_send_event(tp, event);
5188 /* tp->lock is held. */
5189 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5191 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5192 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5194 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5196 case RESET_KIND_INIT:
5197 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5201 case RESET_KIND_SHUTDOWN:
5202 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5206 case RESET_KIND_SUSPEND:
5207 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5216 if (kind == RESET_KIND_INIT ||
5217 kind == RESET_KIND_SUSPEND)
5218 tg3_ape_driver_state_change(tp, kind);
5221 /* tp->lock is held. */
5222 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5224 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5226 case RESET_KIND_INIT:
5227 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5228 DRV_STATE_START_DONE);
5231 case RESET_KIND_SHUTDOWN:
5232 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5233 DRV_STATE_UNLOAD_DONE);
5241 if (kind == RESET_KIND_SHUTDOWN)
5242 tg3_ape_driver_state_change(tp, kind);
5245 /* tp->lock is held. */
5246 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5248 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5250 case RESET_KIND_INIT:
5251 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5255 case RESET_KIND_SHUTDOWN:
5256 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5260 case RESET_KIND_SUSPEND:
5261 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5271 static int tg3_poll_fw(struct tg3 *tp)
5276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5277 /* Wait up to 20ms for init done. */
5278 for (i = 0; i < 200; i++) {
5279 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5286 /* Wait for firmware initialization to complete. */
5287 for (i = 0; i < 100000; i++) {
5288 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5289 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5294 /* Chip might not be fitted with firmware. Some Sun onboard
5295 * parts are configured like that. So don't signal the timeout
5296 * of the above loop as an error, but do report the lack of
5297 * running firmware once.
5300 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5301 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5303 printk(KERN_INFO PFX "%s: No firmware running.\n",
5310 /* Save PCI command register before chip reset */
5311 static void tg3_save_pci_state(struct tg3 *tp)
5313 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5316 /* Restore PCI state after chip reset */
5317 static void tg3_restore_pci_state(struct tg3 *tp)
5321 /* Re-enable indirect register accesses. */
5322 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5323 tp->misc_host_ctrl);
5325 /* Set MAX PCI retry to zero. */
5326 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5327 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5328 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5329 val |= PCISTATE_RETRY_SAME_DMA;
5330 /* Allow reads and writes to the APE register and memory space. */
5331 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5332 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5333 PCISTATE_ALLOW_APE_SHMEM_WR;
5334 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5336 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5338 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5339 pcie_set_readrq(tp->pdev, 4096);
5341 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5342 tp->pci_cacheline_sz);
5343 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5347 /* Make sure PCI-X relaxed ordering bit is clear. */
5351 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5353 pcix_cmd &= ~PCI_X_CMD_ERO;
5354 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5358 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5360 /* Chip reset on 5780 will reset MSI enable bit,
5361 * so need to restore it.
5363 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5366 pci_read_config_word(tp->pdev,
5367 tp->msi_cap + PCI_MSI_FLAGS,
5369 pci_write_config_word(tp->pdev,
5370 tp->msi_cap + PCI_MSI_FLAGS,
5371 ctrl | PCI_MSI_FLAGS_ENABLE);
5372 val = tr32(MSGINT_MODE);
5373 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5378 static void tg3_stop_fw(struct tg3 *);
5380 /* tp->lock is held. */
5381 static int tg3_chip_reset(struct tg3 *tp)
5384 void (*write_op)(struct tg3 *, u32, u32);
5389 /* No matching tg3_nvram_unlock() after this because
5390 * chip reset below will undo the nvram lock.
5392 tp->nvram_lock_cnt = 0;
5394 /* GRC_MISC_CFG core clock reset will clear the memory
5395 * enable bit in PCI register 4 and the MSI enable bit
5396 * on some chips, so we save relevant registers here.
5398 tg3_save_pci_state(tp);
5400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5402 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5405 tw32(GRC_FASTBOOT_PC, 0);
5408 * We must avoid the readl() that normally takes place.
5409 * It locks machines, causes machine checks, and other
5410 * fun things. So, temporarily disable the 5701
5411 * hardware workaround, while we do the reset.
5413 write_op = tp->write32;
5414 if (write_op == tg3_write_flush_reg32)
5415 tp->write32 = tg3_write32;
5417 /* Prevent the irq handler from reading or writing PCI registers
5418 * during chip reset when the memory enable bit in the PCI command
5419 * register may be cleared. The chip does not generate interrupt
5420 * at this time, but the irq handler may still be called due to irq
5421 * sharing or irqpoll.
5423 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5424 if (tp->hw_status) {
5425 tp->hw_status->status = 0;
5426 tp->hw_status->status_tag = 0;
5430 synchronize_irq(tp->pdev->irq);
5433 val = GRC_MISC_CFG_CORECLK_RESET;
5435 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5436 if (tr32(0x7e2c) == 0x60) {
5439 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5440 tw32(GRC_MISC_CFG, (1 << 29));
5445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5446 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5447 tw32(GRC_VCPU_EXT_CTRL,
5448 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5451 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5452 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5453 tw32(GRC_MISC_CFG, val);
5455 /* restore 5701 hardware bug workaround write method */
5456 tp->write32 = write_op;
5458 /* Unfortunately, we have to delay before the PCI read back.
5459 * Some 575X chips even will not respond to a PCI cfg access
5460 * when the reset command is given to the chip.
5462 * How do these hardware designers expect things to work
5463 * properly if the PCI write is posted for a long period
5464 * of time? It is always necessary to have some method by
5465 * which a register read back can occur to push the write
5466 * out which does the reset.
5468 * For most tg3 variants the trick below was working.
5473 /* Flush PCI posted writes. The normal MMIO registers
5474 * are inaccessible at this time so this is the only
5475 * way to make this reliably (actually, this is no longer
5476 * the case, see above). I tried to use indirect
5477 * register read/write but this upset some 5701 variants.
5479 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5483 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5484 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5488 /* Wait for link training to complete. */
5489 for (i = 0; i < 5000; i++)
5492 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5493 pci_write_config_dword(tp->pdev, 0xc4,
5494 cfg_val | (1 << 15));
5496 /* Set PCIE max payload size and clear error status. */
5497 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5500 tg3_restore_pci_state(tp);
5502 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5505 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5506 val = tr32(MEMARB_MODE);
5507 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5509 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5511 tw32(0x5000, 0x400);
5514 tw32(GRC_MODE, tp->grc_mode);
5516 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5519 tw32(0xc4, val | (1 << 15));
5522 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5524 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5525 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5526 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5527 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5530 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5531 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5532 tw32_f(MAC_MODE, tp->mac_mode);
5533 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5534 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5535 tw32_f(MAC_MODE, tp->mac_mode);
5537 tw32_f(MAC_MODE, 0);
5540 err = tg3_poll_fw(tp);
5544 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5545 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5548 tw32(0x7c00, val | (1 << 25));
5551 /* Reprobe ASF enable state. */
5552 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5553 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5554 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5555 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5558 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5559 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5560 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5561 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5562 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5569 /* tp->lock is held. */
5570 static void tg3_stop_fw(struct tg3 *tp)
5572 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5573 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5576 /* Wait for RX cpu to ACK the previous event. */
5577 tg3_wait_for_event_ack(tp);
5579 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5580 val = tr32(GRC_RX_CPU_EVENT);
5581 val |= GRC_RX_CPU_DRIVER_EVENT;
5582 tw32(GRC_RX_CPU_EVENT, val);
5584 /* Wait for RX cpu to ACK this event. */
5585 tg3_wait_for_event_ack(tp);
5589 /* tp->lock is held. */
5590 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5596 tg3_write_sig_pre_reset(tp, kind);
5598 tg3_abort_hw(tp, silent);
5599 err = tg3_chip_reset(tp);
5601 tg3_write_sig_legacy(tp, kind);
5602 tg3_write_sig_post_reset(tp, kind);
5610 #define TG3_FW_RELEASE_MAJOR 0x0
5611 #define TG3_FW_RELASE_MINOR 0x0
5612 #define TG3_FW_RELEASE_FIX 0x0
5613 #define TG3_FW_START_ADDR 0x08000000
5614 #define TG3_FW_TEXT_ADDR 0x08000000
5615 #define TG3_FW_TEXT_LEN 0x9c0
5616 #define TG3_FW_RODATA_ADDR 0x080009c0
5617 #define TG3_FW_RODATA_LEN 0x60
5618 #define TG3_FW_DATA_ADDR 0x08000a40
5619 #define TG3_FW_DATA_LEN 0x20
5620 #define TG3_FW_SBSS_ADDR 0x08000a60
5621 #define TG3_FW_SBSS_LEN 0xc
5622 #define TG3_FW_BSS_ADDR 0x08000a70
5623 #define TG3_FW_BSS_LEN 0x10
5625 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5626 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5627 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5628 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5629 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5630 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5631 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5632 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5633 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5634 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5635 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5636 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5637 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5638 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5639 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5640 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5641 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5642 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5643 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5644 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5645 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5646 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5647 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5648 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5649 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5650 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5652 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5653 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5654 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5655 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5656 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5657 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5658 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5659 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5660 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5661 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5662 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5663 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5664 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5665 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5666 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5667 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5668 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5669 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5670 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5671 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5672 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5673 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5674 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5675 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5676 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5677 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5678 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5679 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5680 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5681 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5682 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5683 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5684 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5685 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5686 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5687 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5688 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5689 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5690 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5691 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5692 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5693 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5694 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5695 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5696 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5697 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5698 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5699 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5700 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5701 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5702 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5703 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5704 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5705 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5706 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5707 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5708 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5709 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5710 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5711 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5712 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5713 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5714 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5715 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5716 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5719 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5720 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5721 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5722 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5723 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5727 #if 0 /* All zeros, don't eat up space with it. */
5728 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5729 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5730 0x00000000, 0x00000000, 0x00000000, 0x00000000
5734 #define RX_CPU_SCRATCH_BASE 0x30000
5735 #define RX_CPU_SCRATCH_SIZE 0x04000
5736 #define TX_CPU_SCRATCH_BASE 0x34000
5737 #define TX_CPU_SCRATCH_SIZE 0x04000
5739 /* tp->lock is held. */
5740 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5744 BUG_ON(offset == TX_CPU_BASE &&
5745 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5748 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5750 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5753 if (offset == RX_CPU_BASE) {
5754 for (i = 0; i < 10000; i++) {
5755 tw32(offset + CPU_STATE, 0xffffffff);
5756 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5757 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5761 tw32(offset + CPU_STATE, 0xffffffff);
5762 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5765 for (i = 0; i < 10000; i++) {
5766 tw32(offset + CPU_STATE, 0xffffffff);
5767 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5768 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5774 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5777 (offset == RX_CPU_BASE ? "RX" : "TX"));
5781 /* Clear firmware's nvram arbitration. */
5782 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5783 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5788 unsigned int text_base;
5789 unsigned int text_len;
5790 const u32 *text_data;
5791 unsigned int rodata_base;
5792 unsigned int rodata_len;
5793 const u32 *rodata_data;
5794 unsigned int data_base;
5795 unsigned int data_len;
5796 const u32 *data_data;
5799 /* tp->lock is held. */
5800 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5801 int cpu_scratch_size, struct fw_info *info)
5803 int err, lock_err, i;
5804 void (*write_op)(struct tg3 *, u32, u32);
5806 if (cpu_base == TX_CPU_BASE &&
5807 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5808 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5809 "TX cpu firmware on %s which is 5705.\n",
5814 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5815 write_op = tg3_write_mem;
5817 write_op = tg3_write_indirect_reg32;
5819 /* It is possible that bootcode is still loading at this point.
5820 * Get the nvram lock first before halting the cpu.
5822 lock_err = tg3_nvram_lock(tp);
5823 err = tg3_halt_cpu(tp, cpu_base);
5825 tg3_nvram_unlock(tp);
5829 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5830 write_op(tp, cpu_scratch_base + i, 0);
5831 tw32(cpu_base + CPU_STATE, 0xffffffff);
5832 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5833 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5834 write_op(tp, (cpu_scratch_base +
5835 (info->text_base & 0xffff) +
5838 info->text_data[i] : 0));
5839 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5840 write_op(tp, (cpu_scratch_base +
5841 (info->rodata_base & 0xffff) +
5843 (info->rodata_data ?
5844 info->rodata_data[i] : 0));
5845 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5846 write_op(tp, (cpu_scratch_base +
5847 (info->data_base & 0xffff) +
5850 info->data_data[i] : 0));
5858 /* tp->lock is held. */
5859 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5861 struct fw_info info;
5864 info.text_base = TG3_FW_TEXT_ADDR;
5865 info.text_len = TG3_FW_TEXT_LEN;
5866 info.text_data = &tg3FwText[0];
5867 info.rodata_base = TG3_FW_RODATA_ADDR;
5868 info.rodata_len = TG3_FW_RODATA_LEN;
5869 info.rodata_data = &tg3FwRodata[0];
5870 info.data_base = TG3_FW_DATA_ADDR;
5871 info.data_len = TG3_FW_DATA_LEN;
5872 info.data_data = NULL;
5874 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5875 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5880 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5881 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5886 /* Now startup only the RX cpu. */
5887 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5888 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5890 for (i = 0; i < 5; i++) {
5891 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5893 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5894 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5895 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5899 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5900 "to set RX CPU PC, is %08x should be %08x\n",
5901 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5905 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5906 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5912 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5913 #define TG3_TSO_FW_RELASE_MINOR 0x6
5914 #define TG3_TSO_FW_RELEASE_FIX 0x0
5915 #define TG3_TSO_FW_START_ADDR 0x08000000
5916 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5917 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5918 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5919 #define TG3_TSO_FW_RODATA_LEN 0x60
5920 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5921 #define TG3_TSO_FW_DATA_LEN 0x30
5922 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5923 #define TG3_TSO_FW_SBSS_LEN 0x2c
5924 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5925 #define TG3_TSO_FW_BSS_LEN 0x894
5927 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5928 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5929 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5930 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5931 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5932 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5933 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5934 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5935 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5936 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5937 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5938 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5939 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5940 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5941 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5942 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5943 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5944 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5945 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5946 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5947 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5948 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5949 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5950 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5951 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5952 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5953 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5954 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5955 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5956 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5957 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5958 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5959 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5960 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5961 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5962 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5963 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5964 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5965 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5966 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5967 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5968 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5969 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5970 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5971 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5972 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5973 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5974 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5975 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5976 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5977 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5978 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5979 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5980 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5981 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5982 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5983 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5984 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5985 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5986 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5987 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5988 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5989 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5990 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5991 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5992 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5993 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5994 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5995 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5996 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5997 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5998 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5999 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6000 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6001 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6002 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6003 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6004 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6005 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6006 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6007 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6008 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6009 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6010 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6011 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6012 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6013 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6014 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6015 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6016 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6017 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6018 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6019 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6020 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6021 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6022 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6023 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6024 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6025 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6026 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6027 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6028 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6029 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6030 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6031 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6032 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6033 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6034 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6035 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6036 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6037 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6038 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6039 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6040 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6041 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6042 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6043 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6044 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6045 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6046 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6047 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6048 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6049 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6050 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6051 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6052 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6053 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6054 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6055 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6056 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6057 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6058 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6059 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6060 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6061 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6062 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6063 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6064 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6065 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6066 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6067 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6068 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6069 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6070 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6071 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6072 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6073 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6074 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6075 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6076 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6077 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6078 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6079 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6080 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6081 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6082 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6083 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6084 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6085 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6086 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6087 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6088 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6089 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6090 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6091 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6092 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6093 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6094 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6095 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6096 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6097 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6098 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6099 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6100 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6101 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6102 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6103 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6104 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6105 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6106 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6107 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6108 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6109 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6110 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6111 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6112 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6113 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6114 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6115 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6116 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6117 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6118 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6119 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6120 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6121 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6122 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6123 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6124 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6125 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6126 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6127 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6128 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6129 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6130 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6131 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6132 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6133 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6134 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6135 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6136 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6137 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6138 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6139 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6140 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6141 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6142 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6143 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6144 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6145 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6146 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6147 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6148 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6149 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6150 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6151 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6152 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6153 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6154 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6155 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6156 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6157 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6158 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6159 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6160 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6161 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6162 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6163 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6164 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6165 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6166 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6167 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6168 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6169 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6170 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6171 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6172 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6173 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6174 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6175 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6176 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6177 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6178 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6179 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6180 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6181 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6182 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6183 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6184 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6185 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6186 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6187 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6188 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6189 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6190 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6191 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6192 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6193 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6194 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6195 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6196 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6197 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6198 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6199 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6200 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6201 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6202 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6203 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6204 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6205 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6206 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6207 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6208 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6209 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6210 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6211 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6214 static const u32 tg3TsoFwRodata[] = {
6215 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6216 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6217 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6218 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6222 static const u32 tg3TsoFwData[] = {
6223 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6224 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6228 /* 5705 needs a special version of the TSO firmware. */
6229 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6230 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6231 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6232 #define TG3_TSO5_FW_START_ADDR 0x00010000
6233 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6234 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6235 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6236 #define TG3_TSO5_FW_RODATA_LEN 0x50
6237 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6238 #define TG3_TSO5_FW_DATA_LEN 0x20
6239 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6240 #define TG3_TSO5_FW_SBSS_LEN 0x28
6241 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6242 #define TG3_TSO5_FW_BSS_LEN 0x88
6244 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6245 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6246 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6247 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6248 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6249 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6250 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6251 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6252 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6253 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6254 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6255 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6256 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6257 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6258 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6259 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6260 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6261 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6262 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6263 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6264 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6265 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6266 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6267 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6268 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6269 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6270 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6271 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6272 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6273 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6274 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6275 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6276 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6277 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6278 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6279 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6280 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6281 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6282 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6283 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6284 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6285 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6286 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6287 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6288 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6289 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6290 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6291 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6292 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6293 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6294 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6295 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6296 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6297 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6298 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6299 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6300 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6301 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6302 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6303 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6304 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6305 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6306 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6307 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6308 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6309 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6310 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6311 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6312 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6313 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6314 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6315 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6316 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6317 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6318 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6319 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6320 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6321 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6322 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6323 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6324 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6325 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6326 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6327 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6328 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6329 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6330 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6331 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6332 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6333 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6334 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6335 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6336 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6337 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6338 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6339 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6340 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6341 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6342 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6343 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6344 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6345 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6346 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6347 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6348 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6349 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6350 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6351 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6352 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6353 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6354 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6355 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6356 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6357 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6358 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6359 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6360 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6361 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6362 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6363 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6364 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6365 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6366 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6367 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6368 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6369 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6370 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6371 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6372 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6373 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6374 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6375 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6376 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6377 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6378 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6379 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6380 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6381 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6382 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6383 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6384 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6385 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6386 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6387 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6388 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6389 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6390 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6391 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6392 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6393 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6394 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6395 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6396 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6397 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6398 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6399 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6400 0x00000000, 0x00000000, 0x00000000,
6403 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6404 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6405 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6406 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6407 0x00000000, 0x00000000, 0x00000000,
6410 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6411 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6412 0x00000000, 0x00000000, 0x00000000,
6415 /* tp->lock is held. */
6416 static int tg3_load_tso_firmware(struct tg3 *tp)
6418 struct fw_info info;
6419 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6422 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6426 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6427 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6428 info.text_data = &tg3Tso5FwText[0];
6429 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6430 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6431 info.rodata_data = &tg3Tso5FwRodata[0];
6432 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6433 info.data_len = TG3_TSO5_FW_DATA_LEN;
6434 info.data_data = &tg3Tso5FwData[0];
6435 cpu_base = RX_CPU_BASE;
6436 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6437 cpu_scratch_size = (info.text_len +
6440 TG3_TSO5_FW_SBSS_LEN +
6441 TG3_TSO5_FW_BSS_LEN);
6443 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6444 info.text_len = TG3_TSO_FW_TEXT_LEN;
6445 info.text_data = &tg3TsoFwText[0];
6446 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6447 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6448 info.rodata_data = &tg3TsoFwRodata[0];
6449 info.data_base = TG3_TSO_FW_DATA_ADDR;
6450 info.data_len = TG3_TSO_FW_DATA_LEN;
6451 info.data_data = &tg3TsoFwData[0];
6452 cpu_base = TX_CPU_BASE;
6453 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6454 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6457 err = tg3_load_firmware_cpu(tp, cpu_base,
6458 cpu_scratch_base, cpu_scratch_size,
6463 /* Now startup the cpu. */
6464 tw32(cpu_base + CPU_STATE, 0xffffffff);
6465 tw32_f(cpu_base + CPU_PC, info.text_base);
6467 for (i = 0; i < 5; i++) {
6468 if (tr32(cpu_base + CPU_PC) == info.text_base)
6470 tw32(cpu_base + CPU_STATE, 0xffffffff);
6471 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6472 tw32_f(cpu_base + CPU_PC, info.text_base);
6476 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6477 "to set CPU PC, is %08x should be %08x\n",
6478 tp->dev->name, tr32(cpu_base + CPU_PC),
6482 tw32(cpu_base + CPU_STATE, 0xffffffff);
6483 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6488 /* tp->lock is held. */
6489 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6491 u32 addr_high, addr_low;
6494 addr_high = ((tp->dev->dev_addr[0] << 8) |
6495 tp->dev->dev_addr[1]);
6496 addr_low = ((tp->dev->dev_addr[2] << 24) |
6497 (tp->dev->dev_addr[3] << 16) |
6498 (tp->dev->dev_addr[4] << 8) |
6499 (tp->dev->dev_addr[5] << 0));
6500 for (i = 0; i < 4; i++) {
6501 if (i == 1 && skip_mac_1)
6503 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6504 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6509 for (i = 0; i < 12; i++) {
6510 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6511 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6515 addr_high = (tp->dev->dev_addr[0] +
6516 tp->dev->dev_addr[1] +
6517 tp->dev->dev_addr[2] +
6518 tp->dev->dev_addr[3] +
6519 tp->dev->dev_addr[4] +
6520 tp->dev->dev_addr[5]) &
6521 TX_BACKOFF_SEED_MASK;
6522 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6525 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6527 struct tg3 *tp = netdev_priv(dev);
6528 struct sockaddr *addr = p;
6529 int err = 0, skip_mac_1 = 0;
6531 if (!is_valid_ether_addr(addr->sa_data))
6534 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6536 if (!netif_running(dev))
6539 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6540 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6542 addr0_high = tr32(MAC_ADDR_0_HIGH);
6543 addr0_low = tr32(MAC_ADDR_0_LOW);
6544 addr1_high = tr32(MAC_ADDR_1_HIGH);
6545 addr1_low = tr32(MAC_ADDR_1_LOW);
6547 /* Skip MAC addr 1 if ASF is using it. */
6548 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6549 !(addr1_high == 0 && addr1_low == 0))
6552 spin_lock_bh(&tp->lock);
6553 __tg3_set_mac_addr(tp, skip_mac_1);
6554 spin_unlock_bh(&tp->lock);
6559 /* tp->lock is held. */
6560 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6561 dma_addr_t mapping, u32 maxlen_flags,
6565 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6566 ((u64) mapping >> 32));
6568 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6569 ((u64) mapping & 0xffffffff));
6571 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6574 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6576 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6580 static void __tg3_set_rx_mode(struct net_device *);
6581 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6583 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6584 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6585 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6586 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6587 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6588 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6589 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6591 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6592 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6593 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6594 u32 val = ec->stats_block_coalesce_usecs;
6596 if (!netif_carrier_ok(tp->dev))
6599 tw32(HOSTCC_STAT_COAL_TICKS, val);
6603 /* tp->lock is held. */
6604 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6606 u32 val, rdmac_mode;
6609 tg3_disable_ints(tp);
6613 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6615 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6616 tg3_abort_hw(tp, 1);
6620 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6623 err = tg3_chip_reset(tp);
6627 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6629 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6630 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6631 val = tr32(TG3_CPMU_CTRL);
6632 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6633 tw32(TG3_CPMU_CTRL, val);
6635 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6636 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6637 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6638 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6640 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6641 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6642 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6643 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6645 val = tr32(TG3_CPMU_HST_ACC);
6646 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6647 val |= CPMU_HST_ACC_MACCLK_6_25;
6648 tw32(TG3_CPMU_HST_ACC, val);
6651 /* This works around an issue with Athlon chipsets on
6652 * B3 tigon3 silicon. This bit has no effect on any
6653 * other revision. But do not set this on PCI Express
6654 * chips and don't even touch the clocks if the CPMU is present.
6656 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6657 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6658 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6659 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6662 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6663 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6664 val = tr32(TG3PCI_PCISTATE);
6665 val |= PCISTATE_RETRY_SAME_DMA;
6666 tw32(TG3PCI_PCISTATE, val);
6669 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6670 /* Allow reads and writes to the
6671 * APE register and memory space.
6673 val = tr32(TG3PCI_PCISTATE);
6674 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6675 PCISTATE_ALLOW_APE_SHMEM_WR;
6676 tw32(TG3PCI_PCISTATE, val);
6679 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6680 /* Enable some hw fixes. */
6681 val = tr32(TG3PCI_MSI_DATA);
6682 val |= (1 << 26) | (1 << 28) | (1 << 29);
6683 tw32(TG3PCI_MSI_DATA, val);
6686 /* Descriptor ring init may make accesses to the
6687 * NIC SRAM area to setup the TX descriptors, so we
6688 * can only do this after the hardware has been
6689 * successfully reset.
6691 err = tg3_init_rings(tp);
6695 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6696 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6697 /* This value is determined during the probe time DMA
6698 * engine test, tg3_test_dma.
6700 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6703 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6704 GRC_MODE_4X_NIC_SEND_RINGS |
6705 GRC_MODE_NO_TX_PHDR_CSUM |
6706 GRC_MODE_NO_RX_PHDR_CSUM);
6707 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6709 /* Pseudo-header checksum is done by hardware logic and not
6710 * the offload processers, so make the chip do the pseudo-
6711 * header checksums on receive. For transmit it is more
6712 * convenient to do the pseudo-header checksum in software
6713 * as Linux does that on transmit for us in all cases.
6715 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6719 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6721 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6722 val = tr32(GRC_MISC_CFG);
6724 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6725 tw32(GRC_MISC_CFG, val);
6727 /* Initialize MBUF/DESC pool. */
6728 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6730 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6731 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6733 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6735 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6736 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6737 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6739 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6742 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6743 TG3_TSO5_FW_RODATA_LEN +
6744 TG3_TSO5_FW_DATA_LEN +
6745 TG3_TSO5_FW_SBSS_LEN +
6746 TG3_TSO5_FW_BSS_LEN);
6747 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6748 tw32(BUFMGR_MB_POOL_ADDR,
6749 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6750 tw32(BUFMGR_MB_POOL_SIZE,
6751 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6754 if (tp->dev->mtu <= ETH_DATA_LEN) {
6755 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6756 tp->bufmgr_config.mbuf_read_dma_low_water);
6757 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6758 tp->bufmgr_config.mbuf_mac_rx_low_water);
6759 tw32(BUFMGR_MB_HIGH_WATER,
6760 tp->bufmgr_config.mbuf_high_water);
6762 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6763 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6764 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6765 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6766 tw32(BUFMGR_MB_HIGH_WATER,
6767 tp->bufmgr_config.mbuf_high_water_jumbo);
6769 tw32(BUFMGR_DMA_LOW_WATER,
6770 tp->bufmgr_config.dma_low_water);
6771 tw32(BUFMGR_DMA_HIGH_WATER,
6772 tp->bufmgr_config.dma_high_water);
6774 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6775 for (i = 0; i < 2000; i++) {
6776 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6781 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6786 /* Setup replenish threshold. */
6787 val = tp->rx_pending / 8;
6790 else if (val > tp->rx_std_max_post)
6791 val = tp->rx_std_max_post;
6792 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6793 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6794 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6796 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6797 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6800 tw32(RCVBDI_STD_THRESH, val);
6802 /* Initialize TG3_BDINFO's at:
6803 * RCVDBDI_STD_BD: standard eth size rx ring
6804 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6805 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6808 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6809 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6810 * ring attribute flags
6811 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6813 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6814 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6816 * The size of each ring is fixed in the firmware, but the location is
6819 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6820 ((u64) tp->rx_std_mapping >> 32));
6821 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6822 ((u64) tp->rx_std_mapping & 0xffffffff));
6823 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6824 NIC_SRAM_RX_BUFFER_DESC);
6826 /* Don't even try to program the JUMBO/MINI buffer descriptor
6829 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6830 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6831 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6833 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6834 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6836 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6837 BDINFO_FLAGS_DISABLED);
6839 /* Setup replenish threshold. */
6840 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6842 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6843 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6844 ((u64) tp->rx_jumbo_mapping >> 32));
6845 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6846 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6847 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6848 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6849 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6850 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6852 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6853 BDINFO_FLAGS_DISABLED);
6858 /* There is only one send ring on 5705/5750, no need to explicitly
6859 * disable the others.
6861 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6862 /* Clear out send RCB ring in SRAM. */
6863 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6864 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6865 BDINFO_FLAGS_DISABLED);
6870 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6871 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6873 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6874 tp->tx_desc_mapping,
6875 (TG3_TX_RING_SIZE <<
6876 BDINFO_FLAGS_MAXLEN_SHIFT),
6877 NIC_SRAM_TX_BUFFER_DESC);
6879 /* There is only one receive return ring on 5705/5750, no need
6880 * to explicitly disable the others.
6882 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6883 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6884 i += TG3_BDINFO_SIZE) {
6885 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6886 BDINFO_FLAGS_DISABLED);
6891 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6893 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6895 (TG3_RX_RCB_RING_SIZE(tp) <<
6896 BDINFO_FLAGS_MAXLEN_SHIFT),
6899 tp->rx_std_ptr = tp->rx_pending;
6900 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6903 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6904 tp->rx_jumbo_pending : 0;
6905 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6908 /* Initialize MAC address and backoff seed. */
6909 __tg3_set_mac_addr(tp, 0);
6911 /* MTU + ethernet header + FCS + optional VLAN tag */
6912 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6914 /* The slot time is changed by tg3_setup_phy if we
6915 * run at gigabit with half duplex.
6917 tw32(MAC_TX_LENGTHS,
6918 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6919 (6 << TX_LENGTHS_IPG_SHIFT) |
6920 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6922 /* Receive rules. */
6923 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6924 tw32(RCVLPC_CONFIG, 0x0181);
6926 /* Calculate RDMAC_MODE setting early, we need it to determine
6927 * the RCVLPC_STATE_ENABLE mask.
6929 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6930 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6931 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6932 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6933 RDMAC_MODE_LNGREAD_ENAB);
6935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6936 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6937 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6938 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6940 /* If statement applies to 5705 and 5750 PCI devices only */
6941 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6942 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6943 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6944 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6946 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6947 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6948 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6949 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6953 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6954 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6956 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6957 rdmac_mode |= (1 << 27);
6959 /* Receive/send statistics. */
6960 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6961 val = tr32(RCVLPC_STATS_ENABLE);
6962 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6963 tw32(RCVLPC_STATS_ENABLE, val);
6964 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6965 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6966 val = tr32(RCVLPC_STATS_ENABLE);
6967 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6968 tw32(RCVLPC_STATS_ENABLE, val);
6970 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6972 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6973 tw32(SNDDATAI_STATSENAB, 0xffffff);
6974 tw32(SNDDATAI_STATSCTRL,
6975 (SNDDATAI_SCTRL_ENABLE |
6976 SNDDATAI_SCTRL_FASTUPD));
6978 /* Setup host coalescing engine. */
6979 tw32(HOSTCC_MODE, 0);
6980 for (i = 0; i < 2000; i++) {
6981 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6986 __tg3_set_coalesce(tp, &tp->coal);
6988 /* set status block DMA address */
6989 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6990 ((u64) tp->status_mapping >> 32));
6991 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6992 ((u64) tp->status_mapping & 0xffffffff));
6994 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6995 /* Status/statistics block address. See tg3_timer,
6996 * the tg3_periodic_fetch_stats call there, and
6997 * tg3_get_stats to see how this works for 5705/5750 chips.
6999 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7000 ((u64) tp->stats_mapping >> 32));
7001 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7002 ((u64) tp->stats_mapping & 0xffffffff));
7003 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7004 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7007 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7009 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7010 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7011 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7012 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7014 /* Clear statistics/status block in chip, and status block in ram. */
7015 for (i = NIC_SRAM_STATS_BLK;
7016 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7018 tg3_write_mem(tp, i, 0);
7021 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7023 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7024 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7025 /* reset to prevent losing 1st rx packet intermittently */
7026 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7030 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7031 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7032 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7033 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7034 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7035 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7036 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7039 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7040 * If TG3_FLG2_IS_NIC is zero, we should read the
7041 * register to preserve the GPIO settings for LOMs. The GPIOs,
7042 * whether used as inputs or outputs, are set by boot code after
7045 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7048 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7049 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7050 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7053 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7054 GRC_LCLCTRL_GPIO_OUTPUT3;
7056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7057 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7059 tp->grc_local_ctrl &= ~gpio_mask;
7060 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7062 /* GPIO1 must be driven high for eeprom write protect */
7063 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7064 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7065 GRC_LCLCTRL_GPIO_OUTPUT1);
7067 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7070 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7073 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7074 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7078 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7079 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7080 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7081 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7082 WDMAC_MODE_LNGREAD_ENAB);
7084 /* If statement applies to 5705 and 5750 PCI devices only */
7085 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7086 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7088 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7089 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7090 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7092 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7093 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7094 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7095 val |= WDMAC_MODE_RX_ACCEL;
7099 /* Enable host coalescing bug fix */
7100 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7101 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7102 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7103 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7104 val |= WDMAC_MODE_STATUS_TAG_FIX;
7106 tw32_f(WDMAC_MODE, val);
7109 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7112 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7115 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7116 pcix_cmd |= PCI_X_CMD_READ_2K;
7117 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7118 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7119 pcix_cmd |= PCI_X_CMD_READ_2K;
7121 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7125 tw32_f(RDMAC_MODE, rdmac_mode);
7128 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7129 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7130 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7134 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7136 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7138 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7139 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7140 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7141 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7142 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7143 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7144 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7145 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7147 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7148 err = tg3_load_5701_a0_firmware_fix(tp);
7153 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7154 err = tg3_load_tso_firmware(tp);
7159 tp->tx_mode = TX_MODE_ENABLE;
7160 tw32_f(MAC_TX_MODE, tp->tx_mode);
7163 tp->rx_mode = RX_MODE_ENABLE;
7164 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7165 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7166 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7168 tw32_f(MAC_RX_MODE, tp->rx_mode);
7171 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
7172 tw32_f(MAC_MI_MODE, tp->mi_mode);
7175 tw32(MAC_LED_CTRL, tp->led_ctrl);
7177 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7178 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7179 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7182 tw32_f(MAC_RX_MODE, tp->rx_mode);
7185 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7186 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7187 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7188 /* Set drive transmission level to 1.2V */
7189 /* only if the signal pre-emphasis bit is not set */
7190 val = tr32(MAC_SERDES_CFG);
7193 tw32(MAC_SERDES_CFG, val);
7195 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7196 tw32(MAC_SERDES_CFG, 0x616000);
7199 /* Prevent chip from dropping frames when flow control
7202 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7205 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7206 /* Use hardware link auto-negotiation */
7207 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7210 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7211 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7214 tmp = tr32(SERDES_RX_CTRL);
7215 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7216 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7217 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7218 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7221 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7222 if (tp->link_config.phy_is_low_power) {
7223 tp->link_config.phy_is_low_power = 0;
7224 tp->link_config.speed = tp->link_config.orig_speed;
7225 tp->link_config.duplex = tp->link_config.orig_duplex;
7226 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7229 err = tg3_setup_phy(tp, 0);
7233 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7234 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7237 /* Clear CRC stats. */
7238 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7239 tg3_writephy(tp, MII_TG3_TEST1,
7240 tmp | MII_TG3_TEST1_CRC_EN);
7241 tg3_readphy(tp, 0x14, &tmp);
7246 __tg3_set_rx_mode(tp->dev);
7248 /* Initialize receive rules. */
7249 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7250 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7251 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7252 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7254 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7255 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7259 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7263 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7265 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7267 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7269 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7271 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7273 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7275 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7277 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7279 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7281 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7283 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7285 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7287 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7289 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7297 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7298 /* Write our heartbeat update interval to APE. */
7299 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7300 APE_HOST_HEARTBEAT_INT_DISABLE);
7302 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7307 /* Called at device open time to get the chip ready for
7308 * packet processing. Invoked with tp->lock held.
7310 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7314 /* Force the chip into D0. */
7315 err = tg3_set_power_state(tp, PCI_D0);
7319 tg3_switch_clocks(tp);
7321 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7323 err = tg3_reset_hw(tp, reset_phy);
7329 #define TG3_STAT_ADD32(PSTAT, REG) \
7330 do { u32 __val = tr32(REG); \
7331 (PSTAT)->low += __val; \
7332 if ((PSTAT)->low < __val) \
7333 (PSTAT)->high += 1; \
7336 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7338 struct tg3_hw_stats *sp = tp->hw_stats;
7340 if (!netif_carrier_ok(tp->dev))
7343 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7344 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7345 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7346 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7347 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7348 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7349 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7350 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7351 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7352 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7353 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7354 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7355 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7357 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7358 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7359 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7360 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7361 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7362 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7363 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7364 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7365 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7366 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7367 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7368 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7369 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7370 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7372 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7373 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7374 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7377 static void tg3_timer(unsigned long __opaque)
7379 struct tg3 *tp = (struct tg3 *) __opaque;
7384 spin_lock(&tp->lock);
7386 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7387 /* All of this garbage is because when using non-tagged
7388 * IRQ status the mailbox/status_block protocol the chip
7389 * uses with the cpu is race prone.
7391 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7392 tw32(GRC_LOCAL_CTRL,
7393 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7395 tw32(HOSTCC_MODE, tp->coalesce_mode |
7396 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7399 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7400 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7401 spin_unlock(&tp->lock);
7402 schedule_work(&tp->reset_task);
7407 /* This part only runs once per second. */
7408 if (!--tp->timer_counter) {
7409 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7410 tg3_periodic_fetch_stats(tp);
7412 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7416 mac_stat = tr32(MAC_STATUS);
7419 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7420 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7422 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7426 tg3_setup_phy(tp, 0);
7427 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7428 u32 mac_stat = tr32(MAC_STATUS);
7431 if (netif_carrier_ok(tp->dev) &&
7432 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7435 if (! netif_carrier_ok(tp->dev) &&
7436 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7437 MAC_STATUS_SIGNAL_DET))) {
7441 if (!tp->serdes_counter) {
7444 ~MAC_MODE_PORT_MODE_MASK));
7446 tw32_f(MAC_MODE, tp->mac_mode);
7449 tg3_setup_phy(tp, 0);
7451 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7452 tg3_serdes_parallel_detect(tp);
7454 tp->timer_counter = tp->timer_multiplier;
7457 /* Heartbeat is only sent once every 2 seconds.
7459 * The heartbeat is to tell the ASF firmware that the host
7460 * driver is still alive. In the event that the OS crashes,
7461 * ASF needs to reset the hardware to free up the FIFO space
7462 * that may be filled with rx packets destined for the host.
7463 * If the FIFO is full, ASF will no longer function properly.
7465 * Unintended resets have been reported on real time kernels
7466 * where the timer doesn't run on time. Netpoll will also have
7469 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7470 * to check the ring condition when the heartbeat is expiring
7471 * before doing the reset. This will prevent most unintended
7474 if (!--tp->asf_counter) {
7475 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7478 tg3_wait_for_event_ack(tp);
7480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7481 FWCMD_NICDRV_ALIVE3);
7482 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7483 /* 5 seconds timeout */
7484 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7485 val = tr32(GRC_RX_CPU_EVENT);
7486 val |= GRC_RX_CPU_DRIVER_EVENT;
7487 tw32_f(GRC_RX_CPU_EVENT, val);
7489 tp->asf_counter = tp->asf_multiplier;
7492 spin_unlock(&tp->lock);
7495 tp->timer.expires = jiffies + tp->timer_offset;
7496 add_timer(&tp->timer);
7499 static int tg3_request_irq(struct tg3 *tp)
7502 unsigned long flags;
7503 struct net_device *dev = tp->dev;
7505 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7507 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7509 flags = IRQF_SAMPLE_RANDOM;
7512 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7513 fn = tg3_interrupt_tagged;
7514 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7516 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7519 static int tg3_test_interrupt(struct tg3 *tp)
7521 struct net_device *dev = tp->dev;
7522 int err, i, intr_ok = 0;
7524 if (!netif_running(dev))
7527 tg3_disable_ints(tp);
7529 free_irq(tp->pdev->irq, dev);
7531 err = request_irq(tp->pdev->irq, tg3_test_isr,
7532 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7536 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7537 tg3_enable_ints(tp);
7539 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7542 for (i = 0; i < 5; i++) {
7543 u32 int_mbox, misc_host_ctrl;
7545 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7547 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7549 if ((int_mbox != 0) ||
7550 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7558 tg3_disable_ints(tp);
7560 free_irq(tp->pdev->irq, dev);
7562 err = tg3_request_irq(tp);
7573 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7574 * successfully restored
7576 static int tg3_test_msi(struct tg3 *tp)
7578 struct net_device *dev = tp->dev;
7582 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7585 /* Turn off SERR reporting in case MSI terminates with Master
7588 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7589 pci_write_config_word(tp->pdev, PCI_COMMAND,
7590 pci_cmd & ~PCI_COMMAND_SERR);
7592 err = tg3_test_interrupt(tp);
7594 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7599 /* other failures */
7603 /* MSI test failed, go back to INTx mode */
7604 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7605 "switching to INTx mode. Please report this failure to "
7606 "the PCI maintainer and include system chipset information.\n",
7609 free_irq(tp->pdev->irq, dev);
7610 pci_disable_msi(tp->pdev);
7612 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7614 err = tg3_request_irq(tp);
7618 /* Need to reset the chip because the MSI cycle may have terminated
7619 * with Master Abort.
7621 tg3_full_lock(tp, 1);
7623 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7624 err = tg3_init_hw(tp, 1);
7626 tg3_full_unlock(tp);
7629 free_irq(tp->pdev->irq, dev);
7634 static int tg3_open(struct net_device *dev)
7636 struct tg3 *tp = netdev_priv(dev);
7639 netif_carrier_off(tp->dev);
7641 tg3_full_lock(tp, 0);
7643 err = tg3_set_power_state(tp, PCI_D0);
7645 tg3_full_unlock(tp);
7649 tg3_disable_ints(tp);
7650 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7652 tg3_full_unlock(tp);
7654 /* The placement of this call is tied
7655 * to the setup and use of Host TX descriptors.
7657 err = tg3_alloc_consistent(tp);
7661 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7662 /* All MSI supporting chips should support tagged
7663 * status. Assert that this is the case.
7665 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7666 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7667 "Not using MSI.\n", tp->dev->name);
7668 } else if (pci_enable_msi(tp->pdev) == 0) {
7671 msi_mode = tr32(MSGINT_MODE);
7672 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7673 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7676 err = tg3_request_irq(tp);
7679 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7680 pci_disable_msi(tp->pdev);
7681 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7683 tg3_free_consistent(tp);
7687 napi_enable(&tp->napi);
7689 tg3_full_lock(tp, 0);
7691 err = tg3_init_hw(tp, 1);
7693 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7696 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7697 tp->timer_offset = HZ;
7699 tp->timer_offset = HZ / 10;
7701 BUG_ON(tp->timer_offset > HZ);
7702 tp->timer_counter = tp->timer_multiplier =
7703 (HZ / tp->timer_offset);
7704 tp->asf_counter = tp->asf_multiplier =
7705 ((HZ / tp->timer_offset) * 2);
7707 init_timer(&tp->timer);
7708 tp->timer.expires = jiffies + tp->timer_offset;
7709 tp->timer.data = (unsigned long) tp;
7710 tp->timer.function = tg3_timer;
7713 tg3_full_unlock(tp);
7716 napi_disable(&tp->napi);
7717 free_irq(tp->pdev->irq, dev);
7718 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7719 pci_disable_msi(tp->pdev);
7720 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7722 tg3_free_consistent(tp);
7726 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7727 err = tg3_test_msi(tp);
7730 tg3_full_lock(tp, 0);
7732 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7733 pci_disable_msi(tp->pdev);
7734 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7736 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7738 tg3_free_consistent(tp);
7740 tg3_full_unlock(tp);
7742 napi_disable(&tp->napi);
7747 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7748 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7749 u32 val = tr32(PCIE_TRANSACTION_CFG);
7751 tw32(PCIE_TRANSACTION_CFG,
7752 val | PCIE_TRANS_CFG_1SHOT_MSI);
7757 tg3_full_lock(tp, 0);
7759 add_timer(&tp->timer);
7760 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7761 tg3_enable_ints(tp);
7763 tg3_full_unlock(tp);
7765 netif_start_queue(dev);
7771 /*static*/ void tg3_dump_state(struct tg3 *tp)
7773 u32 val32, val32_2, val32_3, val32_4, val32_5;
7777 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7778 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7779 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7783 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7784 tr32(MAC_MODE), tr32(MAC_STATUS));
7785 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7786 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7787 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7788 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7789 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7790 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7792 /* Send data initiator control block */
7793 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7794 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7795 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7796 tr32(SNDDATAI_STATSCTRL));
7798 /* Send data completion control block */
7799 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7801 /* Send BD ring selector block */
7802 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7803 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7805 /* Send BD initiator control block */
7806 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7807 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7809 /* Send BD completion control block */
7810 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7812 /* Receive list placement control block */
7813 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7814 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7815 printk(" RCVLPC_STATSCTRL[%08x]\n",
7816 tr32(RCVLPC_STATSCTRL));
7818 /* Receive data and receive BD initiator control block */
7819 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7820 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7822 /* Receive data completion control block */
7823 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7826 /* Receive BD initiator control block */
7827 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7828 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7830 /* Receive BD completion control block */
7831 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7832 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7834 /* Receive list selector control block */
7835 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7836 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7838 /* Mbuf cluster free block */
7839 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7840 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7842 /* Host coalescing control block */
7843 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7844 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7845 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7846 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7847 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7848 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7849 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7850 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7851 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7852 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7853 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7854 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7856 /* Memory arbiter control block */
7857 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7858 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7860 /* Buffer manager control block */
7861 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7862 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7863 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7864 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7865 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7866 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7867 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7868 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7870 /* Read DMA control block */
7871 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7872 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7874 /* Write DMA control block */
7875 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7876 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7878 /* DMA completion block */
7879 printk("DEBUG: DMAC_MODE[%08x]\n",
7883 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7884 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7885 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7886 tr32(GRC_LOCAL_CTRL));
7889 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7890 tr32(RCVDBDI_JUMBO_BD + 0x0),
7891 tr32(RCVDBDI_JUMBO_BD + 0x4),
7892 tr32(RCVDBDI_JUMBO_BD + 0x8),
7893 tr32(RCVDBDI_JUMBO_BD + 0xc));
7894 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7895 tr32(RCVDBDI_STD_BD + 0x0),
7896 tr32(RCVDBDI_STD_BD + 0x4),
7897 tr32(RCVDBDI_STD_BD + 0x8),
7898 tr32(RCVDBDI_STD_BD + 0xc));
7899 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7900 tr32(RCVDBDI_MINI_BD + 0x0),
7901 tr32(RCVDBDI_MINI_BD + 0x4),
7902 tr32(RCVDBDI_MINI_BD + 0x8),
7903 tr32(RCVDBDI_MINI_BD + 0xc));
7905 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7906 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7907 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7908 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7909 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7910 val32, val32_2, val32_3, val32_4);
7912 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7913 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7914 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7915 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7916 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7917 val32, val32_2, val32_3, val32_4);
7919 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7920 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7921 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7922 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7923 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7924 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7925 val32, val32_2, val32_3, val32_4, val32_5);
7927 /* SW status block */
7928 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7929 tp->hw_status->status,
7930 tp->hw_status->status_tag,
7931 tp->hw_status->rx_jumbo_consumer,
7932 tp->hw_status->rx_consumer,
7933 tp->hw_status->rx_mini_consumer,
7934 tp->hw_status->idx[0].rx_producer,
7935 tp->hw_status->idx[0].tx_consumer);
7937 /* SW statistics block */
7938 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7939 ((u32 *)tp->hw_stats)[0],
7940 ((u32 *)tp->hw_stats)[1],
7941 ((u32 *)tp->hw_stats)[2],
7942 ((u32 *)tp->hw_stats)[3]);
7945 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7946 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7947 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7948 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7949 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7951 /* NIC side send descriptors. */
7952 for (i = 0; i < 6; i++) {
7955 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7956 + (i * sizeof(struct tg3_tx_buffer_desc));
7957 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7959 readl(txd + 0x0), readl(txd + 0x4),
7960 readl(txd + 0x8), readl(txd + 0xc));
7963 /* NIC side RX descriptors. */
7964 for (i = 0; i < 6; i++) {
7967 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7968 + (i * sizeof(struct tg3_rx_buffer_desc));
7969 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7971 readl(rxd + 0x0), readl(rxd + 0x4),
7972 readl(rxd + 0x8), readl(rxd + 0xc));
7973 rxd += (4 * sizeof(u32));
7974 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7976 readl(rxd + 0x0), readl(rxd + 0x4),
7977 readl(rxd + 0x8), readl(rxd + 0xc));
7980 for (i = 0; i < 6; i++) {
7983 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7984 + (i * sizeof(struct tg3_rx_buffer_desc));
7985 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7987 readl(rxd + 0x0), readl(rxd + 0x4),
7988 readl(rxd + 0x8), readl(rxd + 0xc));
7989 rxd += (4 * sizeof(u32));
7990 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7992 readl(rxd + 0x0), readl(rxd + 0x4),
7993 readl(rxd + 0x8), readl(rxd + 0xc));
7998 static struct net_device_stats *tg3_get_stats(struct net_device *);
7999 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8001 static int tg3_close(struct net_device *dev)
8003 struct tg3 *tp = netdev_priv(dev);
8005 napi_disable(&tp->napi);
8006 cancel_work_sync(&tp->reset_task);
8008 netif_stop_queue(dev);
8010 del_timer_sync(&tp->timer);
8012 tg3_full_lock(tp, 1);
8017 tg3_disable_ints(tp);
8019 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8021 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8023 tg3_full_unlock(tp);
8025 free_irq(tp->pdev->irq, dev);
8026 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8027 pci_disable_msi(tp->pdev);
8028 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8031 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8032 sizeof(tp->net_stats_prev));
8033 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8034 sizeof(tp->estats_prev));
8036 tg3_free_consistent(tp);
8038 tg3_set_power_state(tp, PCI_D3hot);
8040 netif_carrier_off(tp->dev);
8045 static inline unsigned long get_stat64(tg3_stat64_t *val)
8049 #if (BITS_PER_LONG == 32)
8052 ret = ((u64)val->high << 32) | ((u64)val->low);
8057 static unsigned long calc_crc_errors(struct tg3 *tp)
8059 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8061 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8062 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8066 spin_lock_bh(&tp->lock);
8067 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8068 tg3_writephy(tp, MII_TG3_TEST1,
8069 val | MII_TG3_TEST1_CRC_EN);
8070 tg3_readphy(tp, 0x14, &val);
8073 spin_unlock_bh(&tp->lock);
8075 tp->phy_crc_errors += val;
8077 return tp->phy_crc_errors;
8080 return get_stat64(&hw_stats->rx_fcs_errors);
8083 #define ESTAT_ADD(member) \
8084 estats->member = old_estats->member + \
8085 get_stat64(&hw_stats->member)
8087 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8089 struct tg3_ethtool_stats *estats = &tp->estats;
8090 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8091 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8096 ESTAT_ADD(rx_octets);
8097 ESTAT_ADD(rx_fragments);
8098 ESTAT_ADD(rx_ucast_packets);
8099 ESTAT_ADD(rx_mcast_packets);
8100 ESTAT_ADD(rx_bcast_packets);
8101 ESTAT_ADD(rx_fcs_errors);
8102 ESTAT_ADD(rx_align_errors);
8103 ESTAT_ADD(rx_xon_pause_rcvd);
8104 ESTAT_ADD(rx_xoff_pause_rcvd);
8105 ESTAT_ADD(rx_mac_ctrl_rcvd);
8106 ESTAT_ADD(rx_xoff_entered);
8107 ESTAT_ADD(rx_frame_too_long_errors);
8108 ESTAT_ADD(rx_jabbers);
8109 ESTAT_ADD(rx_undersize_packets);
8110 ESTAT_ADD(rx_in_length_errors);
8111 ESTAT_ADD(rx_out_length_errors);
8112 ESTAT_ADD(rx_64_or_less_octet_packets);
8113 ESTAT_ADD(rx_65_to_127_octet_packets);
8114 ESTAT_ADD(rx_128_to_255_octet_packets);
8115 ESTAT_ADD(rx_256_to_511_octet_packets);
8116 ESTAT_ADD(rx_512_to_1023_octet_packets);
8117 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8118 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8119 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8120 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8121 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8123 ESTAT_ADD(tx_octets);
8124 ESTAT_ADD(tx_collisions);
8125 ESTAT_ADD(tx_xon_sent);
8126 ESTAT_ADD(tx_xoff_sent);
8127 ESTAT_ADD(tx_flow_control);
8128 ESTAT_ADD(tx_mac_errors);
8129 ESTAT_ADD(tx_single_collisions);
8130 ESTAT_ADD(tx_mult_collisions);
8131 ESTAT_ADD(tx_deferred);
8132 ESTAT_ADD(tx_excessive_collisions);
8133 ESTAT_ADD(tx_late_collisions);
8134 ESTAT_ADD(tx_collide_2times);
8135 ESTAT_ADD(tx_collide_3times);
8136 ESTAT_ADD(tx_collide_4times);
8137 ESTAT_ADD(tx_collide_5times);
8138 ESTAT_ADD(tx_collide_6times);
8139 ESTAT_ADD(tx_collide_7times);
8140 ESTAT_ADD(tx_collide_8times);
8141 ESTAT_ADD(tx_collide_9times);
8142 ESTAT_ADD(tx_collide_10times);
8143 ESTAT_ADD(tx_collide_11times);
8144 ESTAT_ADD(tx_collide_12times);
8145 ESTAT_ADD(tx_collide_13times);
8146 ESTAT_ADD(tx_collide_14times);
8147 ESTAT_ADD(tx_collide_15times);
8148 ESTAT_ADD(tx_ucast_packets);
8149 ESTAT_ADD(tx_mcast_packets);
8150 ESTAT_ADD(tx_bcast_packets);
8151 ESTAT_ADD(tx_carrier_sense_errors);
8152 ESTAT_ADD(tx_discards);
8153 ESTAT_ADD(tx_errors);
8155 ESTAT_ADD(dma_writeq_full);
8156 ESTAT_ADD(dma_write_prioq_full);
8157 ESTAT_ADD(rxbds_empty);
8158 ESTAT_ADD(rx_discards);
8159 ESTAT_ADD(rx_errors);
8160 ESTAT_ADD(rx_threshold_hit);
8162 ESTAT_ADD(dma_readq_full);
8163 ESTAT_ADD(dma_read_prioq_full);
8164 ESTAT_ADD(tx_comp_queue_full);
8166 ESTAT_ADD(ring_set_send_prod_index);
8167 ESTAT_ADD(ring_status_update);
8168 ESTAT_ADD(nic_irqs);
8169 ESTAT_ADD(nic_avoided_irqs);
8170 ESTAT_ADD(nic_tx_threshold_hit);
8175 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8177 struct tg3 *tp = netdev_priv(dev);
8178 struct net_device_stats *stats = &tp->net_stats;
8179 struct net_device_stats *old_stats = &tp->net_stats_prev;
8180 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8185 stats->rx_packets = old_stats->rx_packets +
8186 get_stat64(&hw_stats->rx_ucast_packets) +
8187 get_stat64(&hw_stats->rx_mcast_packets) +
8188 get_stat64(&hw_stats->rx_bcast_packets);
8190 stats->tx_packets = old_stats->tx_packets +
8191 get_stat64(&hw_stats->tx_ucast_packets) +
8192 get_stat64(&hw_stats->tx_mcast_packets) +
8193 get_stat64(&hw_stats->tx_bcast_packets);
8195 stats->rx_bytes = old_stats->rx_bytes +
8196 get_stat64(&hw_stats->rx_octets);
8197 stats->tx_bytes = old_stats->tx_bytes +
8198 get_stat64(&hw_stats->tx_octets);
8200 stats->rx_errors = old_stats->rx_errors +
8201 get_stat64(&hw_stats->rx_errors);
8202 stats->tx_errors = old_stats->tx_errors +
8203 get_stat64(&hw_stats->tx_errors) +
8204 get_stat64(&hw_stats->tx_mac_errors) +
8205 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8206 get_stat64(&hw_stats->tx_discards);
8208 stats->multicast = old_stats->multicast +
8209 get_stat64(&hw_stats->rx_mcast_packets);
8210 stats->collisions = old_stats->collisions +
8211 get_stat64(&hw_stats->tx_collisions);
8213 stats->rx_length_errors = old_stats->rx_length_errors +
8214 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8215 get_stat64(&hw_stats->rx_undersize_packets);
8217 stats->rx_over_errors = old_stats->rx_over_errors +
8218 get_stat64(&hw_stats->rxbds_empty);
8219 stats->rx_frame_errors = old_stats->rx_frame_errors +
8220 get_stat64(&hw_stats->rx_align_errors);
8221 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8222 get_stat64(&hw_stats->tx_discards);
8223 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8224 get_stat64(&hw_stats->tx_carrier_sense_errors);
8226 stats->rx_crc_errors = old_stats->rx_crc_errors +
8227 calc_crc_errors(tp);
8229 stats->rx_missed_errors = old_stats->rx_missed_errors +
8230 get_stat64(&hw_stats->rx_discards);
8235 static inline u32 calc_crc(unsigned char *buf, int len)
8243 for (j = 0; j < len; j++) {
8246 for (k = 0; k < 8; k++) {
8260 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8262 /* accept or reject all multicast frames */
8263 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8264 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8265 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8266 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8269 static void __tg3_set_rx_mode(struct net_device *dev)
8271 struct tg3 *tp = netdev_priv(dev);
8274 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8275 RX_MODE_KEEP_VLAN_TAG);
8277 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8280 #if TG3_VLAN_TAG_USED
8282 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8283 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8285 /* By definition, VLAN is disabled always in this
8288 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8289 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8292 if (dev->flags & IFF_PROMISC) {
8293 /* Promiscuous mode. */
8294 rx_mode |= RX_MODE_PROMISC;
8295 } else if (dev->flags & IFF_ALLMULTI) {
8296 /* Accept all multicast. */
8297 tg3_set_multi (tp, 1);
8298 } else if (dev->mc_count < 1) {
8299 /* Reject all multicast. */
8300 tg3_set_multi (tp, 0);
8302 /* Accept one or more multicast(s). */
8303 struct dev_mc_list *mclist;
8305 u32 mc_filter[4] = { 0, };
8310 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8311 i++, mclist = mclist->next) {
8313 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8315 regidx = (bit & 0x60) >> 5;
8317 mc_filter[regidx] |= (1 << bit);
8320 tw32(MAC_HASH_REG_0, mc_filter[0]);
8321 tw32(MAC_HASH_REG_1, mc_filter[1]);
8322 tw32(MAC_HASH_REG_2, mc_filter[2]);
8323 tw32(MAC_HASH_REG_3, mc_filter[3]);
8326 if (rx_mode != tp->rx_mode) {
8327 tp->rx_mode = rx_mode;
8328 tw32_f(MAC_RX_MODE, rx_mode);
8333 static void tg3_set_rx_mode(struct net_device *dev)
8335 struct tg3 *tp = netdev_priv(dev);
8337 if (!netif_running(dev))
8340 tg3_full_lock(tp, 0);
8341 __tg3_set_rx_mode(dev);
8342 tg3_full_unlock(tp);
8345 #define TG3_REGDUMP_LEN (32 * 1024)
8347 static int tg3_get_regs_len(struct net_device *dev)
8349 return TG3_REGDUMP_LEN;
8352 static void tg3_get_regs(struct net_device *dev,
8353 struct ethtool_regs *regs, void *_p)
8356 struct tg3 *tp = netdev_priv(dev);
8362 memset(p, 0, TG3_REGDUMP_LEN);
8364 if (tp->link_config.phy_is_low_power)
8367 tg3_full_lock(tp, 0);
8369 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8370 #define GET_REG32_LOOP(base,len) \
8371 do { p = (u32 *)(orig_p + (base)); \
8372 for (i = 0; i < len; i += 4) \
8373 __GET_REG32((base) + i); \
8375 #define GET_REG32_1(reg) \
8376 do { p = (u32 *)(orig_p + (reg)); \
8377 __GET_REG32((reg)); \
8380 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8381 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8382 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8383 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8384 GET_REG32_1(SNDDATAC_MODE);
8385 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8386 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8387 GET_REG32_1(SNDBDC_MODE);
8388 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8389 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8390 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8391 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8392 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8393 GET_REG32_1(RCVDCC_MODE);
8394 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8395 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8396 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8397 GET_REG32_1(MBFREE_MODE);
8398 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8399 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8400 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8401 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8402 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8403 GET_REG32_1(RX_CPU_MODE);
8404 GET_REG32_1(RX_CPU_STATE);
8405 GET_REG32_1(RX_CPU_PGMCTR);
8406 GET_REG32_1(RX_CPU_HWBKPT);
8407 GET_REG32_1(TX_CPU_MODE);
8408 GET_REG32_1(TX_CPU_STATE);
8409 GET_REG32_1(TX_CPU_PGMCTR);
8410 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8411 GET_REG32_LOOP(FTQ_RESET, 0x120);
8412 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8413 GET_REG32_1(DMAC_MODE);
8414 GET_REG32_LOOP(GRC_MODE, 0x4c);
8415 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8416 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8419 #undef GET_REG32_LOOP
8422 tg3_full_unlock(tp);
8425 static int tg3_get_eeprom_len(struct net_device *dev)
8427 struct tg3 *tp = netdev_priv(dev);
8429 return tp->nvram_size;
8432 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8433 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8434 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8436 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8438 struct tg3 *tp = netdev_priv(dev);
8441 u32 i, offset, len, b_offset, b_count;
8444 if (tp->link_config.phy_is_low_power)
8447 offset = eeprom->offset;
8451 eeprom->magic = TG3_EEPROM_MAGIC;
8454 /* adjustments to start on required 4 byte boundary */
8455 b_offset = offset & 3;
8456 b_count = 4 - b_offset;
8457 if (b_count > len) {
8458 /* i.e. offset=1 len=2 */
8461 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8464 memcpy(data, ((char*)&val) + b_offset, b_count);
8467 eeprom->len += b_count;
8470 /* read bytes upto the last 4 byte boundary */
8471 pd = &data[eeprom->len];
8472 for (i = 0; i < (len - (len & 3)); i += 4) {
8473 ret = tg3_nvram_read_le(tp, offset + i, &val);
8478 memcpy(pd + i, &val, 4);
8483 /* read last bytes not ending on 4 byte boundary */
8484 pd = &data[eeprom->len];
8486 b_offset = offset + len - b_count;
8487 ret = tg3_nvram_read_le(tp, b_offset, &val);
8490 memcpy(pd, &val, b_count);
8491 eeprom->len += b_count;
8496 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8498 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8500 struct tg3 *tp = netdev_priv(dev);
8502 u32 offset, len, b_offset, odd_len;
8506 if (tp->link_config.phy_is_low_power)
8509 if (eeprom->magic != TG3_EEPROM_MAGIC)
8512 offset = eeprom->offset;
8515 if ((b_offset = (offset & 3))) {
8516 /* adjustments to start on required 4 byte boundary */
8517 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8528 /* adjustments to end on required 4 byte boundary */
8530 len = (len + 3) & ~3;
8531 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8537 if (b_offset || odd_len) {
8538 buf = kmalloc(len, GFP_KERNEL);
8542 memcpy(buf, &start, 4);
8544 memcpy(buf+len-4, &end, 4);
8545 memcpy(buf + b_offset, data, eeprom->len);
8548 ret = tg3_nvram_write_block(tp, offset, len, buf);
8556 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8558 struct tg3 *tp = netdev_priv(dev);
8560 cmd->supported = (SUPPORTED_Autoneg);
8562 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8563 cmd->supported |= (SUPPORTED_1000baseT_Half |
8564 SUPPORTED_1000baseT_Full);
8566 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8567 cmd->supported |= (SUPPORTED_100baseT_Half |
8568 SUPPORTED_100baseT_Full |
8569 SUPPORTED_10baseT_Half |
8570 SUPPORTED_10baseT_Full |
8572 cmd->port = PORT_TP;
8574 cmd->supported |= SUPPORTED_FIBRE;
8575 cmd->port = PORT_FIBRE;
8578 cmd->advertising = tp->link_config.advertising;
8579 if (netif_running(dev)) {
8580 cmd->speed = tp->link_config.active_speed;
8581 cmd->duplex = tp->link_config.active_duplex;
8583 cmd->phy_address = PHY_ADDR;
8584 cmd->transceiver = 0;
8585 cmd->autoneg = tp->link_config.autoneg;
8591 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8593 struct tg3 *tp = netdev_priv(dev);
8595 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8596 /* These are the only valid advertisement bits allowed. */
8597 if (cmd->autoneg == AUTONEG_ENABLE &&
8598 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8599 ADVERTISED_1000baseT_Full |
8600 ADVERTISED_Autoneg |
8603 /* Fiber can only do SPEED_1000. */
8604 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8605 (cmd->speed != SPEED_1000))
8607 /* Copper cannot force SPEED_1000. */
8608 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8609 (cmd->speed == SPEED_1000))
8611 else if ((cmd->speed == SPEED_1000) &&
8612 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8615 tg3_full_lock(tp, 0);
8617 tp->link_config.autoneg = cmd->autoneg;
8618 if (cmd->autoneg == AUTONEG_ENABLE) {
8619 tp->link_config.advertising = (cmd->advertising |
8620 ADVERTISED_Autoneg);
8621 tp->link_config.speed = SPEED_INVALID;
8622 tp->link_config.duplex = DUPLEX_INVALID;
8624 tp->link_config.advertising = 0;
8625 tp->link_config.speed = cmd->speed;
8626 tp->link_config.duplex = cmd->duplex;
8629 tp->link_config.orig_speed = tp->link_config.speed;
8630 tp->link_config.orig_duplex = tp->link_config.duplex;
8631 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8633 if (netif_running(dev))
8634 tg3_setup_phy(tp, 1);
8636 tg3_full_unlock(tp);
8641 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8643 struct tg3 *tp = netdev_priv(dev);
8645 strcpy(info->driver, DRV_MODULE_NAME);
8646 strcpy(info->version, DRV_MODULE_VERSION);
8647 strcpy(info->fw_version, tp->fw_ver);
8648 strcpy(info->bus_info, pci_name(tp->pdev));
8651 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8653 struct tg3 *tp = netdev_priv(dev);
8655 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8656 wol->supported = WAKE_MAGIC;
8660 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8661 wol->wolopts = WAKE_MAGIC;
8662 memset(&wol->sopass, 0, sizeof(wol->sopass));
8665 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8667 struct tg3 *tp = netdev_priv(dev);
8669 if (wol->wolopts & ~WAKE_MAGIC)
8671 if ((wol->wolopts & WAKE_MAGIC) &&
8672 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8675 spin_lock_bh(&tp->lock);
8676 if (wol->wolopts & WAKE_MAGIC)
8677 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8679 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8680 spin_unlock_bh(&tp->lock);
8685 static u32 tg3_get_msglevel(struct net_device *dev)
8687 struct tg3 *tp = netdev_priv(dev);
8688 return tp->msg_enable;
8691 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8693 struct tg3 *tp = netdev_priv(dev);
8694 tp->msg_enable = value;
8697 static int tg3_set_tso(struct net_device *dev, u32 value)
8699 struct tg3 *tp = netdev_priv(dev);
8701 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8706 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8707 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8709 dev->features |= NETIF_F_TSO6;
8710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8711 dev->features |= NETIF_F_TSO_ECN;
8713 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8715 return ethtool_op_set_tso(dev, value);
8718 static int tg3_nway_reset(struct net_device *dev)
8720 struct tg3 *tp = netdev_priv(dev);
8724 if (!netif_running(dev))
8727 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8730 spin_lock_bh(&tp->lock);
8732 tg3_readphy(tp, MII_BMCR, &bmcr);
8733 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8734 ((bmcr & BMCR_ANENABLE) ||
8735 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8736 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8740 spin_unlock_bh(&tp->lock);
8745 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8747 struct tg3 *tp = netdev_priv(dev);
8749 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8750 ering->rx_mini_max_pending = 0;
8751 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8752 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8754 ering->rx_jumbo_max_pending = 0;
8756 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8758 ering->rx_pending = tp->rx_pending;
8759 ering->rx_mini_pending = 0;
8760 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8761 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8763 ering->rx_jumbo_pending = 0;
8765 ering->tx_pending = tp->tx_pending;
8768 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8770 struct tg3 *tp = netdev_priv(dev);
8771 int irq_sync = 0, err = 0;
8773 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8774 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8775 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8776 (ering->tx_pending <= MAX_SKB_FRAGS) ||
8777 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8778 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8781 if (netif_running(dev)) {
8786 tg3_full_lock(tp, irq_sync);
8788 tp->rx_pending = ering->rx_pending;
8790 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8791 tp->rx_pending > 63)
8792 tp->rx_pending = 63;
8793 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8794 tp->tx_pending = ering->tx_pending;
8796 if (netif_running(dev)) {
8797 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8798 err = tg3_restart_hw(tp, 1);
8800 tg3_netif_start(tp);
8803 tg3_full_unlock(tp);
8808 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8810 struct tg3 *tp = netdev_priv(dev);
8812 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8814 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8815 epause->rx_pause = 1;
8817 epause->rx_pause = 0;
8819 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8820 epause->tx_pause = 1;
8822 epause->tx_pause = 0;
8825 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8827 struct tg3 *tp = netdev_priv(dev);
8828 int irq_sync = 0, err = 0;
8830 if (netif_running(dev)) {
8835 tg3_full_lock(tp, irq_sync);
8837 if (epause->autoneg)
8838 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8840 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8841 if (epause->rx_pause)
8842 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8844 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8845 if (epause->tx_pause)
8846 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8848 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8850 if (netif_running(dev)) {
8851 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8852 err = tg3_restart_hw(tp, 1);
8854 tg3_netif_start(tp);
8857 tg3_full_unlock(tp);
8862 static u32 tg3_get_rx_csum(struct net_device *dev)
8864 struct tg3 *tp = netdev_priv(dev);
8865 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8868 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8870 struct tg3 *tp = netdev_priv(dev);
8872 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8878 spin_lock_bh(&tp->lock);
8880 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8882 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8883 spin_unlock_bh(&tp->lock);
8888 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8890 struct tg3 *tp = netdev_priv(dev);
8892 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8902 ethtool_op_set_tx_ipv6_csum(dev, data);
8904 ethtool_op_set_tx_csum(dev, data);
8909 static int tg3_get_sset_count (struct net_device *dev, int sset)
8913 return TG3_NUM_TEST;
8915 return TG3_NUM_STATS;
8921 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8923 switch (stringset) {
8925 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
8928 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
8931 WARN_ON(1); /* we need a WARN() */
8936 static int tg3_phys_id(struct net_device *dev, u32 data)
8938 struct tg3 *tp = netdev_priv(dev);
8941 if (!netif_running(tp->dev))
8945 data = UINT_MAX / 2;
8947 for (i = 0; i < (data * 2); i++) {
8949 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8950 LED_CTRL_1000MBPS_ON |
8951 LED_CTRL_100MBPS_ON |
8952 LED_CTRL_10MBPS_ON |
8953 LED_CTRL_TRAFFIC_OVERRIDE |
8954 LED_CTRL_TRAFFIC_BLINK |
8955 LED_CTRL_TRAFFIC_LED);
8958 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8959 LED_CTRL_TRAFFIC_OVERRIDE);
8961 if (msleep_interruptible(500))
8964 tw32(MAC_LED_CTRL, tp->led_ctrl);
8968 static void tg3_get_ethtool_stats (struct net_device *dev,
8969 struct ethtool_stats *estats, u64 *tmp_stats)
8971 struct tg3 *tp = netdev_priv(dev);
8972 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8975 #define NVRAM_TEST_SIZE 0x100
8976 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8977 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8978 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
8979 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8980 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8982 static int tg3_test_nvram(struct tg3 *tp)
8986 int i, j, k, err = 0, size;
8988 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8991 if (magic == TG3_EEPROM_MAGIC)
8992 size = NVRAM_TEST_SIZE;
8993 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8994 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8995 TG3_EEPROM_SB_FORMAT_1) {
8996 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8997 case TG3_EEPROM_SB_REVISION_0:
8998 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9000 case TG3_EEPROM_SB_REVISION_2:
9001 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9003 case TG3_EEPROM_SB_REVISION_3:
9004 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9011 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9012 size = NVRAM_SELFBOOT_HW_SIZE;
9016 buf = kmalloc(size, GFP_KERNEL);
9021 for (i = 0, j = 0; i < size; i += 4, j++) {
9022 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9028 /* Selfboot format */
9029 magic = swab32(le32_to_cpu(buf[0]));
9030 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9031 TG3_EEPROM_MAGIC_FW) {
9032 u8 *buf8 = (u8 *) buf, csum8 = 0;
9034 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9035 TG3_EEPROM_SB_REVISION_2) {
9036 /* For rev 2, the csum doesn't include the MBA. */
9037 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9039 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9042 for (i = 0; i < size; i++)
9055 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9056 TG3_EEPROM_MAGIC_HW) {
9057 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9058 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9059 u8 *buf8 = (u8 *) buf;
9061 /* Separate the parity bits and the data bytes. */
9062 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9063 if ((i == 0) || (i == 8)) {
9067 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9068 parity[k++] = buf8[i] & msk;
9075 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9076 parity[k++] = buf8[i] & msk;
9079 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9080 parity[k++] = buf8[i] & msk;
9083 data[j++] = buf8[i];
9087 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9088 u8 hw8 = hweight8(data[i]);
9090 if ((hw8 & 0x1) && parity[i])
9092 else if (!(hw8 & 0x1) && !parity[i])
9099 /* Bootstrap checksum at offset 0x10 */
9100 csum = calc_crc((unsigned char *) buf, 0x10);
9101 if(csum != le32_to_cpu(buf[0x10/4]))
9104 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9105 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9106 if (csum != le32_to_cpu(buf[0xfc/4]))
9116 #define TG3_SERDES_TIMEOUT_SEC 2
9117 #define TG3_COPPER_TIMEOUT_SEC 6
9119 static int tg3_test_link(struct tg3 *tp)
9123 if (!netif_running(tp->dev))
9126 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9127 max = TG3_SERDES_TIMEOUT_SEC;
9129 max = TG3_COPPER_TIMEOUT_SEC;
9131 for (i = 0; i < max; i++) {
9132 if (netif_carrier_ok(tp->dev))
9135 if (msleep_interruptible(1000))
9142 /* Only test the commonly used registers */
9143 static int tg3_test_registers(struct tg3 *tp)
9145 int i, is_5705, is_5750;
9146 u32 offset, read_mask, write_mask, val, save_val, read_val;
9150 #define TG3_FL_5705 0x1
9151 #define TG3_FL_NOT_5705 0x2
9152 #define TG3_FL_NOT_5788 0x4
9153 #define TG3_FL_NOT_5750 0x8
9157 /* MAC Control Registers */
9158 { MAC_MODE, TG3_FL_NOT_5705,
9159 0x00000000, 0x00ef6f8c },
9160 { MAC_MODE, TG3_FL_5705,
9161 0x00000000, 0x01ef6b8c },
9162 { MAC_STATUS, TG3_FL_NOT_5705,
9163 0x03800107, 0x00000000 },
9164 { MAC_STATUS, TG3_FL_5705,
9165 0x03800100, 0x00000000 },
9166 { MAC_ADDR_0_HIGH, 0x0000,
9167 0x00000000, 0x0000ffff },
9168 { MAC_ADDR_0_LOW, 0x0000,
9169 0x00000000, 0xffffffff },
9170 { MAC_RX_MTU_SIZE, 0x0000,
9171 0x00000000, 0x0000ffff },
9172 { MAC_TX_MODE, 0x0000,
9173 0x00000000, 0x00000070 },
9174 { MAC_TX_LENGTHS, 0x0000,
9175 0x00000000, 0x00003fff },
9176 { MAC_RX_MODE, TG3_FL_NOT_5705,
9177 0x00000000, 0x000007fc },
9178 { MAC_RX_MODE, TG3_FL_5705,
9179 0x00000000, 0x000007dc },
9180 { MAC_HASH_REG_0, 0x0000,
9181 0x00000000, 0xffffffff },
9182 { MAC_HASH_REG_1, 0x0000,
9183 0x00000000, 0xffffffff },
9184 { MAC_HASH_REG_2, 0x0000,
9185 0x00000000, 0xffffffff },
9186 { MAC_HASH_REG_3, 0x0000,
9187 0x00000000, 0xffffffff },
9189 /* Receive Data and Receive BD Initiator Control Registers. */
9190 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9191 0x00000000, 0xffffffff },
9192 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9193 0x00000000, 0xffffffff },
9194 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9195 0x00000000, 0x00000003 },
9196 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9197 0x00000000, 0xffffffff },
9198 { RCVDBDI_STD_BD+0, 0x0000,
9199 0x00000000, 0xffffffff },
9200 { RCVDBDI_STD_BD+4, 0x0000,
9201 0x00000000, 0xffffffff },
9202 { RCVDBDI_STD_BD+8, 0x0000,
9203 0x00000000, 0xffff0002 },
9204 { RCVDBDI_STD_BD+0xc, 0x0000,
9205 0x00000000, 0xffffffff },
9207 /* Receive BD Initiator Control Registers. */
9208 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9209 0x00000000, 0xffffffff },
9210 { RCVBDI_STD_THRESH, TG3_FL_5705,
9211 0x00000000, 0x000003ff },
9212 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9213 0x00000000, 0xffffffff },
9215 /* Host Coalescing Control Registers. */
9216 { HOSTCC_MODE, TG3_FL_NOT_5705,
9217 0x00000000, 0x00000004 },
9218 { HOSTCC_MODE, TG3_FL_5705,
9219 0x00000000, 0x000000f6 },
9220 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9221 0x00000000, 0xffffffff },
9222 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9223 0x00000000, 0x000003ff },
9224 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9225 0x00000000, 0xffffffff },
9226 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9227 0x00000000, 0x000003ff },
9228 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9229 0x00000000, 0xffffffff },
9230 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9231 0x00000000, 0x000000ff },
9232 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9233 0x00000000, 0xffffffff },
9234 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9235 0x00000000, 0x000000ff },
9236 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9237 0x00000000, 0xffffffff },
9238 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9239 0x00000000, 0xffffffff },
9240 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9241 0x00000000, 0xffffffff },
9242 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9243 0x00000000, 0x000000ff },
9244 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9245 0x00000000, 0xffffffff },
9246 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9247 0x00000000, 0x000000ff },
9248 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9249 0x00000000, 0xffffffff },
9250 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9251 0x00000000, 0xffffffff },
9252 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9253 0x00000000, 0xffffffff },
9254 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9255 0x00000000, 0xffffffff },
9256 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9257 0x00000000, 0xffffffff },
9258 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9259 0xffffffff, 0x00000000 },
9260 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9261 0xffffffff, 0x00000000 },
9263 /* Buffer Manager Control Registers. */
9264 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9265 0x00000000, 0x007fff80 },
9266 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9267 0x00000000, 0x007fffff },
9268 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9269 0x00000000, 0x0000003f },
9270 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9271 0x00000000, 0x000001ff },
9272 { BUFMGR_MB_HIGH_WATER, 0x0000,
9273 0x00000000, 0x000001ff },
9274 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9275 0xffffffff, 0x00000000 },
9276 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9277 0xffffffff, 0x00000000 },
9279 /* Mailbox Registers */
9280 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9281 0x00000000, 0x000001ff },
9282 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9283 0x00000000, 0x000001ff },
9284 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9285 0x00000000, 0x000007ff },
9286 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9287 0x00000000, 0x000001ff },
9289 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9292 is_5705 = is_5750 = 0;
9293 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9295 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9299 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9300 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9303 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9306 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9307 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9310 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9313 offset = (u32) reg_tbl[i].offset;
9314 read_mask = reg_tbl[i].read_mask;
9315 write_mask = reg_tbl[i].write_mask;
9317 /* Save the original register content */
9318 save_val = tr32(offset);
9320 /* Determine the read-only value. */
9321 read_val = save_val & read_mask;
9323 /* Write zero to the register, then make sure the read-only bits
9324 * are not changed and the read/write bits are all zeros.
9330 /* Test the read-only and read/write bits. */
9331 if (((val & read_mask) != read_val) || (val & write_mask))
9334 /* Write ones to all the bits defined by RdMask and WrMask, then
9335 * make sure the read-only bits are not changed and the
9336 * read/write bits are all ones.
9338 tw32(offset, read_mask | write_mask);
9342 /* Test the read-only bits. */
9343 if ((val & read_mask) != read_val)
9346 /* Test the read/write bits. */
9347 if ((val & write_mask) != write_mask)
9350 tw32(offset, save_val);
9356 if (netif_msg_hw(tp))
9357 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9359 tw32(offset, save_val);
9363 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9365 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9369 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9370 for (j = 0; j < len; j += 4) {
9373 tg3_write_mem(tp, offset + j, test_pattern[i]);
9374 tg3_read_mem(tp, offset + j, &val);
9375 if (val != test_pattern[i])
9382 static int tg3_test_memory(struct tg3 *tp)
9384 static struct mem_entry {
9387 } mem_tbl_570x[] = {
9388 { 0x00000000, 0x00b50},
9389 { 0x00002000, 0x1c000},
9390 { 0xffffffff, 0x00000}
9391 }, mem_tbl_5705[] = {
9392 { 0x00000100, 0x0000c},
9393 { 0x00000200, 0x00008},
9394 { 0x00004000, 0x00800},
9395 { 0x00006000, 0x01000},
9396 { 0x00008000, 0x02000},
9397 { 0x00010000, 0x0e000},
9398 { 0xffffffff, 0x00000}
9399 }, mem_tbl_5755[] = {
9400 { 0x00000200, 0x00008},
9401 { 0x00004000, 0x00800},
9402 { 0x00006000, 0x00800},
9403 { 0x00008000, 0x02000},
9404 { 0x00010000, 0x0c000},
9405 { 0xffffffff, 0x00000}
9406 }, mem_tbl_5906[] = {
9407 { 0x00000200, 0x00008},
9408 { 0x00004000, 0x00400},
9409 { 0x00006000, 0x00400},
9410 { 0x00008000, 0x01000},
9411 { 0x00010000, 0x01000},
9412 { 0xffffffff, 0x00000}
9414 struct mem_entry *mem_tbl;
9418 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9423 mem_tbl = mem_tbl_5755;
9424 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9425 mem_tbl = mem_tbl_5906;
9427 mem_tbl = mem_tbl_5705;
9429 mem_tbl = mem_tbl_570x;
9431 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9432 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9433 mem_tbl[i].len)) != 0)
9440 #define TG3_MAC_LOOPBACK 0
9441 #define TG3_PHY_LOOPBACK 1
9443 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9445 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9447 struct sk_buff *skb, *rx_skb;
9450 int num_pkts, tx_len, rx_len, i, err;
9451 struct tg3_rx_buffer_desc *desc;
9453 if (loopback_mode == TG3_MAC_LOOPBACK) {
9454 /* HW errata - mac loopback fails in some cases on 5780.
9455 * Normal traffic and PHY loopback are not affected by
9458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9461 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9462 MAC_MODE_PORT_INT_LPBACK;
9463 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9464 mac_mode |= MAC_MODE_LINK_POLARITY;
9465 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9466 mac_mode |= MAC_MODE_PORT_MODE_MII;
9468 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9469 tw32(MAC_MODE, mac_mode);
9470 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9476 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9479 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9480 phytest | MII_TG3_EPHY_SHADOW_EN);
9481 if (!tg3_readphy(tp, 0x1b, &phy))
9482 tg3_writephy(tp, 0x1b, phy & ~0x20);
9483 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9485 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9487 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9489 tg3_phy_toggle_automdix(tp, 0);
9491 tg3_writephy(tp, MII_BMCR, val);
9494 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9496 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9497 mac_mode |= MAC_MODE_PORT_MODE_MII;
9499 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9501 /* reset to prevent losing 1st rx packet intermittently */
9502 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9503 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9505 tw32_f(MAC_RX_MODE, tp->rx_mode);
9507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9508 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9509 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9510 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9511 mac_mode |= MAC_MODE_LINK_POLARITY;
9512 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9513 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9515 tw32(MAC_MODE, mac_mode);
9523 skb = netdev_alloc_skb(tp->dev, tx_len);
9527 tx_data = skb_put(skb, tx_len);
9528 memcpy(tx_data, tp->dev->dev_addr, 6);
9529 memset(tx_data + 6, 0x0, 8);
9531 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9533 for (i = 14; i < tx_len; i++)
9534 tx_data[i] = (u8) (i & 0xff);
9536 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9538 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9543 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9547 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9552 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9554 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9558 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9559 for (i = 0; i < 25; i++) {
9560 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9565 tx_idx = tp->hw_status->idx[0].tx_consumer;
9566 rx_idx = tp->hw_status->idx[0].rx_producer;
9567 if ((tx_idx == tp->tx_prod) &&
9568 (rx_idx == (rx_start_idx + num_pkts)))
9572 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9575 if (tx_idx != tp->tx_prod)
9578 if (rx_idx != rx_start_idx + num_pkts)
9581 desc = &tp->rx_rcb[rx_start_idx];
9582 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9583 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9584 if (opaque_key != RXD_OPAQUE_RING_STD)
9587 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9588 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9591 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9592 if (rx_len != tx_len)
9595 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9597 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9598 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9600 for (i = 14; i < tx_len; i++) {
9601 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9606 /* tg3_free_rings will unmap and free the rx_skb */
9611 #define TG3_MAC_LOOPBACK_FAILED 1
9612 #define TG3_PHY_LOOPBACK_FAILED 2
9613 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9614 TG3_PHY_LOOPBACK_FAILED)
9616 static int tg3_test_loopback(struct tg3 *tp)
9621 if (!netif_running(tp->dev))
9622 return TG3_LOOPBACK_FAILED;
9624 err = tg3_reset_hw(tp, 1);
9626 return TG3_LOOPBACK_FAILED;
9628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9633 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9635 /* Wait for up to 40 microseconds to acquire lock. */
9636 for (i = 0; i < 4; i++) {
9637 status = tr32(TG3_CPMU_MUTEX_GNT);
9638 if (status == CPMU_MUTEX_GNT_DRIVER)
9643 if (status != CPMU_MUTEX_GNT_DRIVER)
9644 return TG3_LOOPBACK_FAILED;
9646 /* Turn off link-based power management. */
9647 cpmuctrl = tr32(TG3_CPMU_CTRL);
9649 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9650 CPMU_CTRL_LINK_AWARE_MODE));
9653 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9654 err |= TG3_MAC_LOOPBACK_FAILED;
9656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9657 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9658 tw32(TG3_CPMU_CTRL, cpmuctrl);
9660 /* Release the mutex */
9661 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9664 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9665 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9666 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9667 err |= TG3_PHY_LOOPBACK_FAILED;
9673 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9676 struct tg3 *tp = netdev_priv(dev);
9678 if (tp->link_config.phy_is_low_power)
9679 tg3_set_power_state(tp, PCI_D0);
9681 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9683 if (tg3_test_nvram(tp) != 0) {
9684 etest->flags |= ETH_TEST_FL_FAILED;
9687 if (tg3_test_link(tp) != 0) {
9688 etest->flags |= ETH_TEST_FL_FAILED;
9691 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9692 int err, irq_sync = 0;
9694 if (netif_running(dev)) {
9699 tg3_full_lock(tp, irq_sync);
9701 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9702 err = tg3_nvram_lock(tp);
9703 tg3_halt_cpu(tp, RX_CPU_BASE);
9704 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9705 tg3_halt_cpu(tp, TX_CPU_BASE);
9707 tg3_nvram_unlock(tp);
9709 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9712 if (tg3_test_registers(tp) != 0) {
9713 etest->flags |= ETH_TEST_FL_FAILED;
9716 if (tg3_test_memory(tp) != 0) {
9717 etest->flags |= ETH_TEST_FL_FAILED;
9720 if ((data[4] = tg3_test_loopback(tp)) != 0)
9721 etest->flags |= ETH_TEST_FL_FAILED;
9723 tg3_full_unlock(tp);
9725 if (tg3_test_interrupt(tp) != 0) {
9726 etest->flags |= ETH_TEST_FL_FAILED;
9730 tg3_full_lock(tp, 0);
9732 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9733 if (netif_running(dev)) {
9734 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9735 if (!tg3_restart_hw(tp, 1))
9736 tg3_netif_start(tp);
9739 tg3_full_unlock(tp);
9741 if (tp->link_config.phy_is_low_power)
9742 tg3_set_power_state(tp, PCI_D3hot);
9746 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9748 struct mii_ioctl_data *data = if_mii(ifr);
9749 struct tg3 *tp = netdev_priv(dev);
9754 data->phy_id = PHY_ADDR;
9760 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9761 break; /* We have no PHY */
9763 if (tp->link_config.phy_is_low_power)
9766 spin_lock_bh(&tp->lock);
9767 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9768 spin_unlock_bh(&tp->lock);
9770 data->val_out = mii_regval;
9776 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9777 break; /* We have no PHY */
9779 if (!capable(CAP_NET_ADMIN))
9782 if (tp->link_config.phy_is_low_power)
9785 spin_lock_bh(&tp->lock);
9786 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9787 spin_unlock_bh(&tp->lock);
9798 #if TG3_VLAN_TAG_USED
9799 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9801 struct tg3 *tp = netdev_priv(dev);
9803 if (netif_running(dev))
9806 tg3_full_lock(tp, 0);
9810 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9811 __tg3_set_rx_mode(dev);
9813 if (netif_running(dev))
9814 tg3_netif_start(tp);
9816 tg3_full_unlock(tp);
9820 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9822 struct tg3 *tp = netdev_priv(dev);
9824 memcpy(ec, &tp->coal, sizeof(*ec));
9828 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9830 struct tg3 *tp = netdev_priv(dev);
9831 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9832 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9834 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9835 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9836 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9837 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9838 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9841 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9842 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9843 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9844 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9845 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9846 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9847 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9848 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9849 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9850 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9853 /* No rx interrupts will be generated if both are zero */
9854 if ((ec->rx_coalesce_usecs == 0) &&
9855 (ec->rx_max_coalesced_frames == 0))
9858 /* No tx interrupts will be generated if both are zero */
9859 if ((ec->tx_coalesce_usecs == 0) &&
9860 (ec->tx_max_coalesced_frames == 0))
9863 /* Only copy relevant parameters, ignore all others. */
9864 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9865 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9866 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9867 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9868 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9869 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9870 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9871 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9872 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9874 if (netif_running(dev)) {
9875 tg3_full_lock(tp, 0);
9876 __tg3_set_coalesce(tp, &tp->coal);
9877 tg3_full_unlock(tp);
9882 static const struct ethtool_ops tg3_ethtool_ops = {
9883 .get_settings = tg3_get_settings,
9884 .set_settings = tg3_set_settings,
9885 .get_drvinfo = tg3_get_drvinfo,
9886 .get_regs_len = tg3_get_regs_len,
9887 .get_regs = tg3_get_regs,
9888 .get_wol = tg3_get_wol,
9889 .set_wol = tg3_set_wol,
9890 .get_msglevel = tg3_get_msglevel,
9891 .set_msglevel = tg3_set_msglevel,
9892 .nway_reset = tg3_nway_reset,
9893 .get_link = ethtool_op_get_link,
9894 .get_eeprom_len = tg3_get_eeprom_len,
9895 .get_eeprom = tg3_get_eeprom,
9896 .set_eeprom = tg3_set_eeprom,
9897 .get_ringparam = tg3_get_ringparam,
9898 .set_ringparam = tg3_set_ringparam,
9899 .get_pauseparam = tg3_get_pauseparam,
9900 .set_pauseparam = tg3_set_pauseparam,
9901 .get_rx_csum = tg3_get_rx_csum,
9902 .set_rx_csum = tg3_set_rx_csum,
9903 .set_tx_csum = tg3_set_tx_csum,
9904 .set_sg = ethtool_op_set_sg,
9905 .set_tso = tg3_set_tso,
9906 .self_test = tg3_self_test,
9907 .get_strings = tg3_get_strings,
9908 .phys_id = tg3_phys_id,
9909 .get_ethtool_stats = tg3_get_ethtool_stats,
9910 .get_coalesce = tg3_get_coalesce,
9911 .set_coalesce = tg3_set_coalesce,
9912 .get_sset_count = tg3_get_sset_count,
9915 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9917 u32 cursize, val, magic;
9919 tp->nvram_size = EEPROM_CHIP_SIZE;
9921 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9924 if ((magic != TG3_EEPROM_MAGIC) &&
9925 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9926 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9930 * Size the chip by reading offsets at increasing powers of two.
9931 * When we encounter our validation signature, we know the addressing
9932 * has wrapped around, and thus have our chip size.
9936 while (cursize < tp->nvram_size) {
9937 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9946 tp->nvram_size = cursize;
9949 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9953 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9956 /* Selfboot format */
9957 if (val != TG3_EEPROM_MAGIC) {
9958 tg3_get_eeprom_size(tp);
9962 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9964 tp->nvram_size = (val >> 16) * 1024;
9968 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
9971 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9975 nvcfg1 = tr32(NVRAM_CFG1);
9976 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9977 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9980 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9981 tw32(NVRAM_CFG1, nvcfg1);
9984 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9985 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9986 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9987 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9988 tp->nvram_jedecnum = JEDEC_ATMEL;
9989 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9990 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9992 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9993 tp->nvram_jedecnum = JEDEC_ATMEL;
9994 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9996 case FLASH_VENDOR_ATMEL_EEPROM:
9997 tp->nvram_jedecnum = JEDEC_ATMEL;
9998 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9999 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10001 case FLASH_VENDOR_ST:
10002 tp->nvram_jedecnum = JEDEC_ST;
10003 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10004 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10006 case FLASH_VENDOR_SAIFUN:
10007 tp->nvram_jedecnum = JEDEC_SAIFUN;
10008 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10010 case FLASH_VENDOR_SST_SMALL:
10011 case FLASH_VENDOR_SST_LARGE:
10012 tp->nvram_jedecnum = JEDEC_SST;
10013 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10018 tp->nvram_jedecnum = JEDEC_ATMEL;
10019 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10020 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10024 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10028 nvcfg1 = tr32(NVRAM_CFG1);
10030 /* NVRAM protection for TPM */
10031 if (nvcfg1 & (1 << 27))
10032 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10034 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10035 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10036 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10037 tp->nvram_jedecnum = JEDEC_ATMEL;
10038 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10040 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10041 tp->nvram_jedecnum = JEDEC_ATMEL;
10042 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10043 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10045 case FLASH_5752VENDOR_ST_M45PE10:
10046 case FLASH_5752VENDOR_ST_M45PE20:
10047 case FLASH_5752VENDOR_ST_M45PE40:
10048 tp->nvram_jedecnum = JEDEC_ST;
10049 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10050 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10054 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10055 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10056 case FLASH_5752PAGE_SIZE_256:
10057 tp->nvram_pagesize = 256;
10059 case FLASH_5752PAGE_SIZE_512:
10060 tp->nvram_pagesize = 512;
10062 case FLASH_5752PAGE_SIZE_1K:
10063 tp->nvram_pagesize = 1024;
10065 case FLASH_5752PAGE_SIZE_2K:
10066 tp->nvram_pagesize = 2048;
10068 case FLASH_5752PAGE_SIZE_4K:
10069 tp->nvram_pagesize = 4096;
10071 case FLASH_5752PAGE_SIZE_264:
10072 tp->nvram_pagesize = 264;
10077 /* For eeprom, set pagesize to maximum eeprom size */
10078 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10080 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10081 tw32(NVRAM_CFG1, nvcfg1);
10085 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10087 u32 nvcfg1, protect = 0;
10089 nvcfg1 = tr32(NVRAM_CFG1);
10091 /* NVRAM protection for TPM */
10092 if (nvcfg1 & (1 << 27)) {
10093 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10097 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10099 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10100 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10101 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10102 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10103 tp->nvram_jedecnum = JEDEC_ATMEL;
10104 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10105 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10106 tp->nvram_pagesize = 264;
10107 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10108 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10109 tp->nvram_size = (protect ? 0x3e200 :
10110 TG3_NVRAM_SIZE_512KB);
10111 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10112 tp->nvram_size = (protect ? 0x1f200 :
10113 TG3_NVRAM_SIZE_256KB);
10115 tp->nvram_size = (protect ? 0x1f200 :
10116 TG3_NVRAM_SIZE_128KB);
10118 case FLASH_5752VENDOR_ST_M45PE10:
10119 case FLASH_5752VENDOR_ST_M45PE20:
10120 case FLASH_5752VENDOR_ST_M45PE40:
10121 tp->nvram_jedecnum = JEDEC_ST;
10122 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10123 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10124 tp->nvram_pagesize = 256;
10125 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10126 tp->nvram_size = (protect ?
10127 TG3_NVRAM_SIZE_64KB :
10128 TG3_NVRAM_SIZE_128KB);
10129 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10130 tp->nvram_size = (protect ?
10131 TG3_NVRAM_SIZE_64KB :
10132 TG3_NVRAM_SIZE_256KB);
10134 tp->nvram_size = (protect ?
10135 TG3_NVRAM_SIZE_128KB :
10136 TG3_NVRAM_SIZE_512KB);
10141 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10145 nvcfg1 = tr32(NVRAM_CFG1);
10147 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10148 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10149 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10150 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10151 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10152 tp->nvram_jedecnum = JEDEC_ATMEL;
10153 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10154 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10156 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10157 tw32(NVRAM_CFG1, nvcfg1);
10159 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10160 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10161 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10162 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10163 tp->nvram_jedecnum = JEDEC_ATMEL;
10164 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10165 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10166 tp->nvram_pagesize = 264;
10168 case FLASH_5752VENDOR_ST_M45PE10:
10169 case FLASH_5752VENDOR_ST_M45PE20:
10170 case FLASH_5752VENDOR_ST_M45PE40:
10171 tp->nvram_jedecnum = JEDEC_ST;
10172 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10173 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10174 tp->nvram_pagesize = 256;
10179 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10181 u32 nvcfg1, protect = 0;
10183 nvcfg1 = tr32(NVRAM_CFG1);
10185 /* NVRAM protection for TPM */
10186 if (nvcfg1 & (1 << 27)) {
10187 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10191 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10193 case FLASH_5761VENDOR_ATMEL_ADB021D:
10194 case FLASH_5761VENDOR_ATMEL_ADB041D:
10195 case FLASH_5761VENDOR_ATMEL_ADB081D:
10196 case FLASH_5761VENDOR_ATMEL_ADB161D:
10197 case FLASH_5761VENDOR_ATMEL_MDB021D:
10198 case FLASH_5761VENDOR_ATMEL_MDB041D:
10199 case FLASH_5761VENDOR_ATMEL_MDB081D:
10200 case FLASH_5761VENDOR_ATMEL_MDB161D:
10201 tp->nvram_jedecnum = JEDEC_ATMEL;
10202 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10203 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10204 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10205 tp->nvram_pagesize = 256;
10207 case FLASH_5761VENDOR_ST_A_M45PE20:
10208 case FLASH_5761VENDOR_ST_A_M45PE40:
10209 case FLASH_5761VENDOR_ST_A_M45PE80:
10210 case FLASH_5761VENDOR_ST_A_M45PE16:
10211 case FLASH_5761VENDOR_ST_M_M45PE20:
10212 case FLASH_5761VENDOR_ST_M_M45PE40:
10213 case FLASH_5761VENDOR_ST_M_M45PE80:
10214 case FLASH_5761VENDOR_ST_M_M45PE16:
10215 tp->nvram_jedecnum = JEDEC_ST;
10216 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10217 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10218 tp->nvram_pagesize = 256;
10223 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10226 case FLASH_5761VENDOR_ATMEL_ADB161D:
10227 case FLASH_5761VENDOR_ATMEL_MDB161D:
10228 case FLASH_5761VENDOR_ST_A_M45PE16:
10229 case FLASH_5761VENDOR_ST_M_M45PE16:
10230 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10232 case FLASH_5761VENDOR_ATMEL_ADB081D:
10233 case FLASH_5761VENDOR_ATMEL_MDB081D:
10234 case FLASH_5761VENDOR_ST_A_M45PE80:
10235 case FLASH_5761VENDOR_ST_M_M45PE80:
10236 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10238 case FLASH_5761VENDOR_ATMEL_ADB041D:
10239 case FLASH_5761VENDOR_ATMEL_MDB041D:
10240 case FLASH_5761VENDOR_ST_A_M45PE40:
10241 case FLASH_5761VENDOR_ST_M_M45PE40:
10242 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10244 case FLASH_5761VENDOR_ATMEL_ADB021D:
10245 case FLASH_5761VENDOR_ATMEL_MDB021D:
10246 case FLASH_5761VENDOR_ST_A_M45PE20:
10247 case FLASH_5761VENDOR_ST_M_M45PE20:
10248 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10254 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10256 tp->nvram_jedecnum = JEDEC_ATMEL;
10257 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10258 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10261 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10262 static void __devinit tg3_nvram_init(struct tg3 *tp)
10264 tw32_f(GRC_EEPROM_ADDR,
10265 (EEPROM_ADDR_FSM_RESET |
10266 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10267 EEPROM_ADDR_CLKPERD_SHIFT)));
10271 /* Enable seeprom accesses. */
10272 tw32_f(GRC_LOCAL_CTRL,
10273 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10276 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10277 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10278 tp->tg3_flags |= TG3_FLAG_NVRAM;
10280 if (tg3_nvram_lock(tp)) {
10281 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10282 "tg3_nvram_init failed.\n", tp->dev->name);
10285 tg3_enable_nvram_access(tp);
10287 tp->nvram_size = 0;
10289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10290 tg3_get_5752_nvram_info(tp);
10291 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10292 tg3_get_5755_nvram_info(tp);
10293 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10295 tg3_get_5787_nvram_info(tp);
10296 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10297 tg3_get_5761_nvram_info(tp);
10298 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10299 tg3_get_5906_nvram_info(tp);
10301 tg3_get_nvram_info(tp);
10303 if (tp->nvram_size == 0)
10304 tg3_get_nvram_size(tp);
10306 tg3_disable_nvram_access(tp);
10307 tg3_nvram_unlock(tp);
10310 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10312 tg3_get_eeprom_size(tp);
10316 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10317 u32 offset, u32 *val)
10322 if (offset > EEPROM_ADDR_ADDR_MASK ||
10326 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10327 EEPROM_ADDR_DEVID_MASK |
10329 tw32(GRC_EEPROM_ADDR,
10331 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10332 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10333 EEPROM_ADDR_ADDR_MASK) |
10334 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10336 for (i = 0; i < 1000; i++) {
10337 tmp = tr32(GRC_EEPROM_ADDR);
10339 if (tmp & EEPROM_ADDR_COMPLETE)
10343 if (!(tmp & EEPROM_ADDR_COMPLETE))
10346 *val = tr32(GRC_EEPROM_DATA);
10350 #define NVRAM_CMD_TIMEOUT 10000
10352 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10356 tw32(NVRAM_CMD, nvram_cmd);
10357 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10359 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10364 if (i == NVRAM_CMD_TIMEOUT) {
10370 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10372 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10373 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10374 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10375 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10376 (tp->nvram_jedecnum == JEDEC_ATMEL))
10378 addr = ((addr / tp->nvram_pagesize) <<
10379 ATMEL_AT45DB0X1B_PAGE_POS) +
10380 (addr % tp->nvram_pagesize);
10385 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10387 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10388 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10389 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10390 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10391 (tp->nvram_jedecnum == JEDEC_ATMEL))
10393 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10394 tp->nvram_pagesize) +
10395 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10400 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10404 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10405 return tg3_nvram_read_using_eeprom(tp, offset, val);
10407 offset = tg3_nvram_phys_addr(tp, offset);
10409 if (offset > NVRAM_ADDR_MSK)
10412 ret = tg3_nvram_lock(tp);
10416 tg3_enable_nvram_access(tp);
10418 tw32(NVRAM_ADDR, offset);
10419 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10420 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10423 *val = swab32(tr32(NVRAM_RDDATA));
10425 tg3_disable_nvram_access(tp);
10427 tg3_nvram_unlock(tp);
10432 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10435 int res = tg3_nvram_read(tp, offset, &v);
10437 *val = cpu_to_le32(v);
10441 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10446 err = tg3_nvram_read(tp, offset, &tmp);
10447 *val = swab32(tmp);
10451 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10452 u32 offset, u32 len, u8 *buf)
10457 for (i = 0; i < len; i += 4) {
10463 memcpy(&data, buf + i, 4);
10465 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10467 val = tr32(GRC_EEPROM_ADDR);
10468 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10470 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10472 tw32(GRC_EEPROM_ADDR, val |
10473 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10474 (addr & EEPROM_ADDR_ADDR_MASK) |
10475 EEPROM_ADDR_START |
10476 EEPROM_ADDR_WRITE);
10478 for (j = 0; j < 1000; j++) {
10479 val = tr32(GRC_EEPROM_ADDR);
10481 if (val & EEPROM_ADDR_COMPLETE)
10485 if (!(val & EEPROM_ADDR_COMPLETE)) {
10494 /* offset and length are dword aligned */
10495 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10499 u32 pagesize = tp->nvram_pagesize;
10500 u32 pagemask = pagesize - 1;
10504 tmp = kmalloc(pagesize, GFP_KERNEL);
10510 u32 phy_addr, page_off, size;
10512 phy_addr = offset & ~pagemask;
10514 for (j = 0; j < pagesize; j += 4) {
10515 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10516 (__le32 *) (tmp + j))))
10522 page_off = offset & pagemask;
10529 memcpy(tmp + page_off, buf, size);
10531 offset = offset + (pagesize - page_off);
10533 tg3_enable_nvram_access(tp);
10536 * Before we can erase the flash page, we need
10537 * to issue a special "write enable" command.
10539 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10541 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10544 /* Erase the target page */
10545 tw32(NVRAM_ADDR, phy_addr);
10547 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10548 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10550 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10553 /* Issue another write enable to start the write. */
10554 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10556 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10559 for (j = 0; j < pagesize; j += 4) {
10562 data = *((__be32 *) (tmp + j));
10563 /* swab32(le32_to_cpu(data)), actually */
10564 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10566 tw32(NVRAM_ADDR, phy_addr + j);
10568 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10572 nvram_cmd |= NVRAM_CMD_FIRST;
10573 else if (j == (pagesize - 4))
10574 nvram_cmd |= NVRAM_CMD_LAST;
10576 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10583 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10584 tg3_nvram_exec_cmd(tp, nvram_cmd);
10591 /* offset and length are dword aligned */
10592 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10597 for (i = 0; i < len; i += 4, offset += 4) {
10598 u32 page_off, phy_addr, nvram_cmd;
10601 memcpy(&data, buf + i, 4);
10602 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10604 page_off = offset % tp->nvram_pagesize;
10606 phy_addr = tg3_nvram_phys_addr(tp, offset);
10608 tw32(NVRAM_ADDR, phy_addr);
10610 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10612 if ((page_off == 0) || (i == 0))
10613 nvram_cmd |= NVRAM_CMD_FIRST;
10614 if (page_off == (tp->nvram_pagesize - 4))
10615 nvram_cmd |= NVRAM_CMD_LAST;
10617 if (i == (len - 4))
10618 nvram_cmd |= NVRAM_CMD_LAST;
10620 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10621 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10622 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10623 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10624 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10625 (tp->nvram_jedecnum == JEDEC_ST) &&
10626 (nvram_cmd & NVRAM_CMD_FIRST)) {
10628 if ((ret = tg3_nvram_exec_cmd(tp,
10629 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10634 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10635 /* We always do complete word writes to eeprom. */
10636 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10639 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10645 /* offset and length are dword aligned */
10646 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10650 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10651 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10652 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10656 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10657 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10662 ret = tg3_nvram_lock(tp);
10666 tg3_enable_nvram_access(tp);
10667 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10668 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10669 tw32(NVRAM_WRITE1, 0x406);
10671 grc_mode = tr32(GRC_MODE);
10672 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10674 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10675 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10677 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10681 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10685 grc_mode = tr32(GRC_MODE);
10686 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10688 tg3_disable_nvram_access(tp);
10689 tg3_nvram_unlock(tp);
10692 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10693 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10700 struct subsys_tbl_ent {
10701 u16 subsys_vendor, subsys_devid;
10705 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10706 /* Broadcom boards. */
10707 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10708 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10709 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10710 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10711 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10712 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10713 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10714 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10715 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10716 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10717 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10720 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10721 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10722 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10723 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10724 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10727 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10728 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10729 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10730 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10732 /* Compaq boards. */
10733 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10734 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10735 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10736 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10737 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10740 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10743 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10747 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10748 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10749 tp->pdev->subsystem_vendor) &&
10750 (subsys_id_to_phy_id[i].subsys_devid ==
10751 tp->pdev->subsystem_device))
10752 return &subsys_id_to_phy_id[i];
10757 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10762 /* On some early chips the SRAM cannot be accessed in D3hot state,
10763 * so need make sure we're in D0.
10765 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10766 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10767 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10770 /* Make sure register accesses (indirect or otherwise)
10771 * will function correctly.
10773 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10774 tp->misc_host_ctrl);
10776 /* The memory arbiter has to be enabled in order for SRAM accesses
10777 * to succeed. Normally on powerup the tg3 chip firmware will make
10778 * sure it is enabled, but other entities such as system netboot
10779 * code might disable it.
10781 val = tr32(MEMARB_MODE);
10782 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10784 tp->phy_id = PHY_ID_INVALID;
10785 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10787 /* Assume an onboard device and WOL capable by default. */
10788 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10791 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10792 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10793 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10795 val = tr32(VCPU_CFGSHDW);
10796 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10797 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10798 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10799 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10800 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10804 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10805 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10806 u32 nic_cfg, led_cfg;
10807 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10808 int eeprom_phy_serdes = 0;
10810 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10811 tp->nic_sram_data_cfg = nic_cfg;
10813 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10814 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10815 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10816 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10817 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10818 (ver > 0) && (ver < 0x100))
10819 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10821 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10822 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10823 eeprom_phy_serdes = 1;
10825 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10826 if (nic_phy_id != 0) {
10827 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10828 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10830 eeprom_phy_id = (id1 >> 16) << 10;
10831 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10832 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10836 tp->phy_id = eeprom_phy_id;
10837 if (eeprom_phy_serdes) {
10838 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10839 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10841 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10844 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10845 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10846 SHASTA_EXT_LED_MODE_MASK);
10848 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10852 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10853 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10856 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10857 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10860 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10861 tp->led_ctrl = LED_CTRL_MODE_MAC;
10863 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10864 * read on some older 5700/5701 bootcode.
10866 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10868 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10870 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10874 case SHASTA_EXT_LED_SHARED:
10875 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10876 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10877 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10878 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10879 LED_CTRL_MODE_PHY_2);
10882 case SHASTA_EXT_LED_MAC:
10883 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10886 case SHASTA_EXT_LED_COMBO:
10887 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10888 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10889 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10890 LED_CTRL_MODE_PHY_2);
10895 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10897 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10898 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10900 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10901 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10903 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10904 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10905 if ((tp->pdev->subsystem_vendor ==
10906 PCI_VENDOR_ID_ARIMA) &&
10907 (tp->pdev->subsystem_device == 0x205a ||
10908 tp->pdev->subsystem_device == 0x2063))
10909 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10911 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10912 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10915 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10916 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10917 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10918 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10920 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10921 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10922 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10923 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10924 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10926 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10927 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10928 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10930 if (cfg2 & (1 << 17))
10931 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10933 /* serdes signal pre-emphasis in register 0x590 set by */
10934 /* bootcode if bit 18 is set */
10935 if (cfg2 & (1 << 18))
10936 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10938 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10941 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10942 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10943 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10948 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10953 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10954 tw32(OTP_CTRL, cmd);
10956 /* Wait for up to 1 ms for command to execute. */
10957 for (i = 0; i < 100; i++) {
10958 val = tr32(OTP_STATUS);
10959 if (val & OTP_STATUS_CMD_DONE)
10964 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10967 /* Read the gphy configuration from the OTP region of the chip. The gphy
10968 * configuration is a 32-bit value that straddles the alignment boundary.
10969 * We do two 32-bit reads and then shift and merge the results.
10971 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10973 u32 bhalf_otp, thalf_otp;
10975 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10977 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10980 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10982 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10985 thalf_otp = tr32(OTP_READ_DATA);
10987 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10989 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10992 bhalf_otp = tr32(OTP_READ_DATA);
10994 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10997 static int __devinit tg3_phy_probe(struct tg3 *tp)
10999 u32 hw_phy_id_1, hw_phy_id_2;
11000 u32 hw_phy_id, hw_phy_id_masked;
11003 /* Reading the PHY ID register can conflict with ASF
11004 * firwmare access to the PHY hardware.
11007 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11008 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11009 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11011 /* Now read the physical PHY_ID from the chip and verify
11012 * that it is sane. If it doesn't look good, we fall back
11013 * to either the hard-coded table based PHY_ID and failing
11014 * that the value found in the eeprom area.
11016 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11017 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11019 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11020 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11021 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11023 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11026 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11027 tp->phy_id = hw_phy_id;
11028 if (hw_phy_id_masked == PHY_ID_BCM8002)
11029 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11031 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11033 if (tp->phy_id != PHY_ID_INVALID) {
11034 /* Do nothing, phy ID already set up in
11035 * tg3_get_eeprom_hw_cfg().
11038 struct subsys_tbl_ent *p;
11040 /* No eeprom signature? Try the hardcoded
11041 * subsys device table.
11043 p = lookup_by_subsys(tp);
11047 tp->phy_id = p->phy_id;
11049 tp->phy_id == PHY_ID_BCM8002)
11050 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11054 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11055 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11056 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11057 u32 bmsr, adv_reg, tg3_ctrl, mask;
11059 tg3_readphy(tp, MII_BMSR, &bmsr);
11060 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11061 (bmsr & BMSR_LSTATUS))
11062 goto skip_phy_reset;
11064 err = tg3_phy_reset(tp);
11068 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11069 ADVERTISE_100HALF | ADVERTISE_100FULL |
11070 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11072 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11073 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11074 MII_TG3_CTRL_ADV_1000_FULL);
11075 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11076 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11077 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11078 MII_TG3_CTRL_ENABLE_AS_MASTER);
11081 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11082 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11083 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11084 if (!tg3_copper_is_advertising_all(tp, mask)) {
11085 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11087 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11088 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11090 tg3_writephy(tp, MII_BMCR,
11091 BMCR_ANENABLE | BMCR_ANRESTART);
11093 tg3_phy_set_wirespeed(tp);
11095 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11096 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11097 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11101 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11102 err = tg3_init_5401phy_dsp(tp);
11107 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11108 err = tg3_init_5401phy_dsp(tp);
11111 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11112 tp->link_config.advertising =
11113 (ADVERTISED_1000baseT_Half |
11114 ADVERTISED_1000baseT_Full |
11115 ADVERTISED_Autoneg |
11117 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11118 tp->link_config.advertising &=
11119 ~(ADVERTISED_1000baseT_Half |
11120 ADVERTISED_1000baseT_Full);
11125 static void __devinit tg3_read_partno(struct tg3 *tp)
11127 unsigned char vpd_data[256];
11131 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11132 goto out_not_found;
11134 if (magic == TG3_EEPROM_MAGIC) {
11135 for (i = 0; i < 256; i += 4) {
11138 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11139 goto out_not_found;
11141 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11142 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11143 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11144 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11149 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11150 for (i = 0; i < 256; i += 4) {
11155 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11157 while (j++ < 100) {
11158 pci_read_config_word(tp->pdev, vpd_cap +
11159 PCI_VPD_ADDR, &tmp16);
11160 if (tmp16 & 0x8000)
11164 if (!(tmp16 & 0x8000))
11165 goto out_not_found;
11167 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11169 v = cpu_to_le32(tmp);
11170 memcpy(&vpd_data[i], &v, 4);
11174 /* Now parse and find the part number. */
11175 for (i = 0; i < 254; ) {
11176 unsigned char val = vpd_data[i];
11177 unsigned int block_end;
11179 if (val == 0x82 || val == 0x91) {
11182 (vpd_data[i + 2] << 8)));
11187 goto out_not_found;
11189 block_end = (i + 3 +
11191 (vpd_data[i + 2] << 8)));
11194 if (block_end > 256)
11195 goto out_not_found;
11197 while (i < (block_end - 2)) {
11198 if (vpd_data[i + 0] == 'P' &&
11199 vpd_data[i + 1] == 'N') {
11200 int partno_len = vpd_data[i + 2];
11203 if (partno_len > 24 || (partno_len + i) > 256)
11204 goto out_not_found;
11206 memcpy(tp->board_part_number,
11207 &vpd_data[i], partno_len);
11212 i += 3 + vpd_data[i + 2];
11215 /* Part number not found. */
11216 goto out_not_found;
11220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11221 strcpy(tp->board_part_number, "BCM95906");
11223 strcpy(tp->board_part_number, "none");
11226 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11230 if (tg3_nvram_read_swab(tp, offset, &val) ||
11231 (val & 0xfc000000) != 0x0c000000 ||
11232 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11239 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11241 u32 val, offset, start;
11245 if (tg3_nvram_read_swab(tp, 0, &val))
11248 if (val != TG3_EEPROM_MAGIC)
11251 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11252 tg3_nvram_read_swab(tp, 0x4, &start))
11255 offset = tg3_nvram_logical_addr(tp, offset);
11257 if (!tg3_fw_img_is_valid(tp, offset) ||
11258 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11261 offset = offset + ver_offset - start;
11262 for (i = 0; i < 16; i += 4) {
11264 if (tg3_nvram_read_le(tp, offset + i, &v))
11267 memcpy(tp->fw_ver + i, &v, 4);
11270 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11271 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11274 for (offset = TG3_NVM_DIR_START;
11275 offset < TG3_NVM_DIR_END;
11276 offset += TG3_NVM_DIRENT_SIZE) {
11277 if (tg3_nvram_read_swab(tp, offset, &val))
11280 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11284 if (offset == TG3_NVM_DIR_END)
11287 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11288 start = 0x08000000;
11289 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11292 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11293 !tg3_fw_img_is_valid(tp, offset) ||
11294 tg3_nvram_read_swab(tp, offset + 8, &val))
11297 offset += val - start;
11299 bcnt = strlen(tp->fw_ver);
11301 tp->fw_ver[bcnt++] = ',';
11302 tp->fw_ver[bcnt++] = ' ';
11304 for (i = 0; i < 4; i++) {
11306 if (tg3_nvram_read_le(tp, offset, &v))
11309 offset += sizeof(v);
11311 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11312 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11316 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11320 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11323 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11325 static int __devinit tg3_get_invariants(struct tg3 *tp)
11327 static struct pci_device_id write_reorder_chipsets[] = {
11328 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11329 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11330 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11331 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11332 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11333 PCI_DEVICE_ID_VIA_8385_0) },
11337 u32 cacheline_sz_reg;
11338 u32 pci_state_reg, grc_misc_cfg;
11343 /* Force memory write invalidate off. If we leave it on,
11344 * then on 5700_BX chips we have to enable a workaround.
11345 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11346 * to match the cacheline size. The Broadcom driver have this
11347 * workaround but turns MWI off all the times so never uses
11348 * it. This seems to suggest that the workaround is insufficient.
11350 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11351 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11352 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11354 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11355 * has the register indirect write enable bit set before
11356 * we try to access any of the MMIO registers. It is also
11357 * critical that the PCI-X hw workaround situation is decided
11358 * before that as well.
11360 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11363 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11364 MISC_HOST_CTRL_CHIPREV_SHIFT);
11365 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11366 u32 prod_id_asic_rev;
11368 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11369 &prod_id_asic_rev);
11370 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11373 /* Wrong chip ID in 5752 A0. This code can be removed later
11374 * as A0 is not in production.
11376 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11377 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11379 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11380 * we need to disable memory and use config. cycles
11381 * only to access all registers. The 5702/03 chips
11382 * can mistakenly decode the special cycles from the
11383 * ICH chipsets as memory write cycles, causing corruption
11384 * of register and memory space. Only certain ICH bridges
11385 * will drive special cycles with non-zero data during the
11386 * address phase which can fall within the 5703's address
11387 * range. This is not an ICH bug as the PCI spec allows
11388 * non-zero address during special cycles. However, only
11389 * these ICH bridges are known to drive non-zero addresses
11390 * during special cycles.
11392 * Since special cycles do not cross PCI bridges, we only
11393 * enable this workaround if the 5703 is on the secondary
11394 * bus of these ICH bridges.
11396 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11397 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11398 static struct tg3_dev_id {
11402 } ich_chipsets[] = {
11403 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11405 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11407 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11409 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11413 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11414 struct pci_dev *bridge = NULL;
11416 while (pci_id->vendor != 0) {
11417 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11423 if (pci_id->rev != PCI_ANY_ID) {
11424 if (bridge->revision > pci_id->rev)
11427 if (bridge->subordinate &&
11428 (bridge->subordinate->number ==
11429 tp->pdev->bus->number)) {
11431 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11432 pci_dev_put(bridge);
11438 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11439 static struct tg3_dev_id {
11442 } bridge_chipsets[] = {
11443 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11444 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11447 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11448 struct pci_dev *bridge = NULL;
11450 while (pci_id->vendor != 0) {
11451 bridge = pci_get_device(pci_id->vendor,
11458 if (bridge->subordinate &&
11459 (bridge->subordinate->number <=
11460 tp->pdev->bus->number) &&
11461 (bridge->subordinate->subordinate >=
11462 tp->pdev->bus->number)) {
11463 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11464 pci_dev_put(bridge);
11470 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11471 * DMA addresses > 40-bit. This bridge may have other additional
11472 * 57xx devices behind it in some 4-port NIC designs for example.
11473 * Any tg3 device found behind the bridge will also need the 40-bit
11476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11478 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11479 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11480 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11483 struct pci_dev *bridge = NULL;
11486 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11487 PCI_DEVICE_ID_SERVERWORKS_EPB,
11489 if (bridge && bridge->subordinate &&
11490 (bridge->subordinate->number <=
11491 tp->pdev->bus->number) &&
11492 (bridge->subordinate->subordinate >=
11493 tp->pdev->bus->number)) {
11494 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11495 pci_dev_put(bridge);
11501 /* Initialize misc host control in PCI block. */
11502 tp->misc_host_ctrl |= (misc_ctrl_reg &
11503 MISC_HOST_CTRL_CHIPREV);
11504 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11505 tp->misc_host_ctrl);
11507 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11508 &cacheline_sz_reg);
11510 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11511 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11512 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11513 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11515 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11516 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11517 tp->pdev_peer = tg3_find_peer(tp);
11519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11526 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11527 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11529 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11530 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11531 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11533 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11534 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11535 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11536 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11537 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11538 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11539 tp->pdev_peer == tp->pdev))
11540 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11547 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11548 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11550 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11551 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11553 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11554 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11558 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11559 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11560 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11562 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11563 if (pcie_cap != 0) {
11564 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11566 pcie_set_readrq(tp->pdev, 4096);
11568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11571 pci_read_config_word(tp->pdev,
11572 pcie_cap + PCI_EXP_LNKCTL,
11574 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11575 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11579 /* If we have an AMD 762 or VIA K8T800 chipset, write
11580 * reordering to the mailbox registers done by the host
11581 * controller can cause major troubles. We read back from
11582 * every mailbox register write to force the writes to be
11583 * posted to the chip in order.
11585 if (pci_dev_present(write_reorder_chipsets) &&
11586 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11587 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11590 tp->pci_lat_timer < 64) {
11591 tp->pci_lat_timer = 64;
11593 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11594 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11595 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11596 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11598 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11602 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11603 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11604 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11605 if (!tp->pcix_cap) {
11606 printk(KERN_ERR PFX "Cannot find PCI-X "
11607 "capability, aborting.\n");
11612 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11615 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11616 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11618 /* If this is a 5700 BX chipset, and we are in PCI-X
11619 * mode, enable register write workaround.
11621 * The workaround is to use indirect register accesses
11622 * for all chip writes not to mailbox registers.
11624 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11627 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11629 /* The chip can have it's power management PCI config
11630 * space registers clobbered due to this bug.
11631 * So explicitly force the chip into D0 here.
11633 pci_read_config_dword(tp->pdev,
11634 tp->pm_cap + PCI_PM_CTRL,
11636 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11637 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11638 pci_write_config_dword(tp->pdev,
11639 tp->pm_cap + PCI_PM_CTRL,
11642 /* Also, force SERR#/PERR# in PCI command. */
11643 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11644 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11645 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11649 /* 5700 BX chips need to have their TX producer index mailboxes
11650 * written twice to workaround a bug.
11652 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11653 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11655 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11656 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11657 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11658 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11660 /* Chip-specific fixup from Broadcom driver */
11661 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11662 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11663 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11664 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11667 /* Default fast path register access methods */
11668 tp->read32 = tg3_read32;
11669 tp->write32 = tg3_write32;
11670 tp->read32_mbox = tg3_read32;
11671 tp->write32_mbox = tg3_write32;
11672 tp->write32_tx_mbox = tg3_write32;
11673 tp->write32_rx_mbox = tg3_write32;
11675 /* Various workaround register access methods */
11676 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11677 tp->write32 = tg3_write_indirect_reg32;
11678 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11679 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11680 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11682 * Back to back register writes can cause problems on these
11683 * chips, the workaround is to read back all reg writes
11684 * except those to mailbox regs.
11686 * See tg3_write_indirect_reg32().
11688 tp->write32 = tg3_write_flush_reg32;
11692 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11693 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11694 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11695 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11696 tp->write32_rx_mbox = tg3_write_flush_reg32;
11699 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11700 tp->read32 = tg3_read_indirect_reg32;
11701 tp->write32 = tg3_write_indirect_reg32;
11702 tp->read32_mbox = tg3_read_indirect_mbox;
11703 tp->write32_mbox = tg3_write_indirect_mbox;
11704 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11705 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11710 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11711 pci_cmd &= ~PCI_COMMAND_MEMORY;
11712 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11715 tp->read32_mbox = tg3_read32_mbox_5906;
11716 tp->write32_mbox = tg3_write32_mbox_5906;
11717 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11718 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11721 if (tp->write32 == tg3_write_indirect_reg32 ||
11722 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11723 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11725 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11727 /* Get eeprom hw config before calling tg3_set_power_state().
11728 * In particular, the TG3_FLG2_IS_NIC flag must be
11729 * determined before calling tg3_set_power_state() so that
11730 * we know whether or not to switch out of Vaux power.
11731 * When the flag is set, it means that GPIO1 is used for eeprom
11732 * write protect and also implies that it is a LOM where GPIOs
11733 * are not used to switch power.
11735 tg3_get_eeprom_hw_cfg(tp);
11737 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11738 /* Allow reads and writes to the
11739 * APE register and memory space.
11741 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11742 PCISTATE_ALLOW_APE_SHMEM_WR;
11743 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11749 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11751 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11752 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11753 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11754 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11755 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11758 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11759 * GPIO1 driven high will bring 5700's external PHY out of reset.
11760 * It is also used as eeprom write protect on LOMs.
11762 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11763 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11764 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11765 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11766 GRC_LCLCTRL_GPIO_OUTPUT1);
11767 /* Unused GPIO3 must be driven as output on 5752 because there
11768 * are no pull-up resistors on unused GPIO pins.
11770 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11771 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11773 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11774 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11776 /* Force the chip into D0. */
11777 err = tg3_set_power_state(tp, PCI_D0);
11779 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11780 pci_name(tp->pdev));
11784 /* 5700 B0 chips do not support checksumming correctly due
11785 * to hardware bugs.
11787 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11788 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11790 /* Derive initial jumbo mode from MTU assigned in
11791 * ether_setup() via the alloc_etherdev() call
11793 if (tp->dev->mtu > ETH_DATA_LEN &&
11794 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11795 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11797 /* Determine WakeOnLan speed to use. */
11798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11799 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11800 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11801 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11802 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11804 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11807 /* A few boards don't want Ethernet@WireSpeed phy feature */
11808 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11809 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11810 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11811 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11812 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11813 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11814 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11816 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11817 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11818 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11819 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11820 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11822 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11826 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11827 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11828 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11829 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11830 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11831 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11832 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11833 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11837 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11838 tp->phy_otp = tg3_read_otp_phycfg(tp);
11839 if (tp->phy_otp == 0)
11840 tp->phy_otp = TG3_OTP_DEFAULT;
11843 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
11844 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11846 tp->mi_mode = MAC_MI_MODE_BASE;
11848 tp->coalesce_mode = 0;
11849 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11850 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11851 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11853 /* Initialize MAC MI mode, polling disabled. */
11854 tw32_f(MAC_MI_MODE, tp->mi_mode);
11857 /* Initialize data/descriptor byte/word swapping. */
11858 val = tr32(GRC_MODE);
11859 val &= GRC_MODE_HOST_STACKUP;
11860 tw32(GRC_MODE, val | tp->grc_mode);
11862 tg3_switch_clocks(tp);
11864 /* Clear this out for sanity. */
11865 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11867 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11869 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11870 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11871 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11873 if (chiprevid == CHIPREV_ID_5701_A0 ||
11874 chiprevid == CHIPREV_ID_5701_B0 ||
11875 chiprevid == CHIPREV_ID_5701_B2 ||
11876 chiprevid == CHIPREV_ID_5701_B5) {
11877 void __iomem *sram_base;
11879 /* Write some dummy words into the SRAM status block
11880 * area, see if it reads back correctly. If the return
11881 * value is bad, force enable the PCIX workaround.
11883 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11885 writel(0x00000000, sram_base);
11886 writel(0x00000000, sram_base + 4);
11887 writel(0xffffffff, sram_base + 4);
11888 if (readl(sram_base) != 0x00000000)
11889 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11894 tg3_nvram_init(tp);
11896 grc_misc_cfg = tr32(GRC_MISC_CFG);
11897 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11900 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11901 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11902 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11904 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11905 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11906 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11907 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11908 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11909 HOSTCC_MODE_CLRTICK_TXBD);
11911 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11912 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11913 tp->misc_host_ctrl);
11916 /* these are limited to 10/100 only */
11917 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11918 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11919 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11920 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11921 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11922 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11923 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11924 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11925 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11926 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11927 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11929 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11931 err = tg3_phy_probe(tp);
11933 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11934 pci_name(tp->pdev), err);
11935 /* ... but do not return immediately ... */
11938 tg3_read_partno(tp);
11939 tg3_read_fw_ver(tp);
11941 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11942 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11945 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11947 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11950 /* 5700 {AX,BX} chips have a broken status block link
11951 * change bit implementation, so we must use the
11952 * status register in those cases.
11954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11955 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11957 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11959 /* The led_ctrl is set during tg3_phy_probe, here we might
11960 * have to force the link status polling mechanism based
11961 * upon subsystem IDs.
11963 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11965 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11966 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11967 TG3_FLAG_USE_LINKCHG_REG);
11970 /* For all SERDES we poll the MAC status register. */
11971 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11972 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11974 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11976 /* All chips before 5787 can get confused if TX buffers
11977 * straddle the 4GB address boundary in some cases.
11979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11984 tp->dev->hard_start_xmit = tg3_start_xmit;
11986 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11990 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11993 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11995 /* Increment the rx prod index on the rx std ring by at most
11996 * 8 for these chips to workaround hw errata.
11998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12001 tp->rx_std_max_post = 8;
12003 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12004 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12005 PCIE_PWR_MGMT_L1_THRESH_MSK;
12010 #ifdef CONFIG_SPARC
12011 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12013 struct net_device *dev = tp->dev;
12014 struct pci_dev *pdev = tp->pdev;
12015 struct device_node *dp = pci_device_to_OF_node(pdev);
12016 const unsigned char *addr;
12019 addr = of_get_property(dp, "local-mac-address", &len);
12020 if (addr && len == 6) {
12021 memcpy(dev->dev_addr, addr, 6);
12022 memcpy(dev->perm_addr, dev->dev_addr, 6);
12028 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12030 struct net_device *dev = tp->dev;
12032 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12033 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12038 static int __devinit tg3_get_device_address(struct tg3 *tp)
12040 struct net_device *dev = tp->dev;
12041 u32 hi, lo, mac_offset;
12044 #ifdef CONFIG_SPARC
12045 if (!tg3_get_macaddr_sparc(tp))
12050 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12051 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12052 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12054 if (tg3_nvram_lock(tp))
12055 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12057 tg3_nvram_unlock(tp);
12059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12062 /* First try to get it from MAC address mailbox. */
12063 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12064 if ((hi >> 16) == 0x484b) {
12065 dev->dev_addr[0] = (hi >> 8) & 0xff;
12066 dev->dev_addr[1] = (hi >> 0) & 0xff;
12068 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12069 dev->dev_addr[2] = (lo >> 24) & 0xff;
12070 dev->dev_addr[3] = (lo >> 16) & 0xff;
12071 dev->dev_addr[4] = (lo >> 8) & 0xff;
12072 dev->dev_addr[5] = (lo >> 0) & 0xff;
12074 /* Some old bootcode may report a 0 MAC address in SRAM */
12075 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12078 /* Next, try NVRAM. */
12079 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12080 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12081 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12082 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12083 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12084 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12085 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12086 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12088 /* Finally just fetch it out of the MAC control regs. */
12090 hi = tr32(MAC_ADDR_0_HIGH);
12091 lo = tr32(MAC_ADDR_0_LOW);
12093 dev->dev_addr[5] = lo & 0xff;
12094 dev->dev_addr[4] = (lo >> 8) & 0xff;
12095 dev->dev_addr[3] = (lo >> 16) & 0xff;
12096 dev->dev_addr[2] = (lo >> 24) & 0xff;
12097 dev->dev_addr[1] = hi & 0xff;
12098 dev->dev_addr[0] = (hi >> 8) & 0xff;
12102 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12103 #ifdef CONFIG_SPARC
12104 if (!tg3_get_default_macaddr_sparc(tp))
12109 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12113 #define BOUNDARY_SINGLE_CACHELINE 1
12114 #define BOUNDARY_MULTI_CACHELINE 2
12116 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12118 int cacheline_size;
12122 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12124 cacheline_size = 1024;
12126 cacheline_size = (int) byte * 4;
12128 /* On 5703 and later chips, the boundary bits have no
12131 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12132 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12133 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12136 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12137 goal = BOUNDARY_MULTI_CACHELINE;
12139 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12140 goal = BOUNDARY_SINGLE_CACHELINE;
12149 /* PCI controllers on most RISC systems tend to disconnect
12150 * when a device tries to burst across a cache-line boundary.
12151 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12153 * Unfortunately, for PCI-E there are only limited
12154 * write-side controls for this, and thus for reads
12155 * we will still get the disconnects. We'll also waste
12156 * these PCI cycles for both read and write for chips
12157 * other than 5700 and 5701 which do not implement the
12160 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12161 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12162 switch (cacheline_size) {
12167 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12168 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12169 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12171 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12172 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12177 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12178 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12182 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12183 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12186 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12187 switch (cacheline_size) {
12191 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12192 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12193 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12199 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12200 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12204 switch (cacheline_size) {
12206 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12207 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12208 DMA_RWCTRL_WRITE_BNDRY_16);
12213 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12214 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12215 DMA_RWCTRL_WRITE_BNDRY_32);
12220 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12221 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12222 DMA_RWCTRL_WRITE_BNDRY_64);
12227 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12228 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12229 DMA_RWCTRL_WRITE_BNDRY_128);
12234 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12235 DMA_RWCTRL_WRITE_BNDRY_256);
12238 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12239 DMA_RWCTRL_WRITE_BNDRY_512);
12243 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12244 DMA_RWCTRL_WRITE_BNDRY_1024);
12253 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12255 struct tg3_internal_buffer_desc test_desc;
12256 u32 sram_dma_descs;
12259 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12261 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12262 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12263 tw32(RDMAC_STATUS, 0);
12264 tw32(WDMAC_STATUS, 0);
12266 tw32(BUFMGR_MODE, 0);
12267 tw32(FTQ_RESET, 0);
12269 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12270 test_desc.addr_lo = buf_dma & 0xffffffff;
12271 test_desc.nic_mbuf = 0x00002100;
12272 test_desc.len = size;
12275 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12276 * the *second* time the tg3 driver was getting loaded after an
12279 * Broadcom tells me:
12280 * ...the DMA engine is connected to the GRC block and a DMA
12281 * reset may affect the GRC block in some unpredictable way...
12282 * The behavior of resets to individual blocks has not been tested.
12284 * Broadcom noted the GRC reset will also reset all sub-components.
12287 test_desc.cqid_sqid = (13 << 8) | 2;
12289 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12292 test_desc.cqid_sqid = (16 << 8) | 7;
12294 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12297 test_desc.flags = 0x00000005;
12299 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12302 val = *(((u32 *)&test_desc) + i);
12303 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12304 sram_dma_descs + (i * sizeof(u32)));
12305 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12307 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12310 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12312 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12316 for (i = 0; i < 40; i++) {
12320 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12322 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12323 if ((val & 0xffff) == sram_dma_descs) {
12334 #define TEST_BUFFER_SIZE 0x2000
12336 static int __devinit tg3_test_dma(struct tg3 *tp)
12338 dma_addr_t buf_dma;
12339 u32 *buf, saved_dma_rwctrl;
12342 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12348 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12349 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12351 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12353 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12354 /* DMA read watermark not used on PCIE */
12355 tp->dma_rwctrl |= 0x00180000;
12356 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12359 tp->dma_rwctrl |= 0x003f0000;
12361 tp->dma_rwctrl |= 0x003f000f;
12363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12365 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12366 u32 read_water = 0x7;
12368 /* If the 5704 is behind the EPB bridge, we can
12369 * do the less restrictive ONE_DMA workaround for
12370 * better performance.
12372 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12373 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12374 tp->dma_rwctrl |= 0x8000;
12375 else if (ccval == 0x6 || ccval == 0x7)
12376 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12380 /* Set bit 23 to enable PCIX hw bug fix */
12382 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12383 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12385 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12386 /* 5780 always in PCIX mode */
12387 tp->dma_rwctrl |= 0x00144000;
12388 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12389 /* 5714 always in PCIX mode */
12390 tp->dma_rwctrl |= 0x00148000;
12392 tp->dma_rwctrl |= 0x001b000f;
12396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12398 tp->dma_rwctrl &= 0xfffffff0;
12400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12402 /* Remove this if it causes problems for some boards. */
12403 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12405 /* On 5700/5701 chips, we need to set this bit.
12406 * Otherwise the chip will issue cacheline transactions
12407 * to streamable DMA memory with not all the byte
12408 * enables turned on. This is an error on several
12409 * RISC PCI controllers, in particular sparc64.
12411 * On 5703/5704 chips, this bit has been reassigned
12412 * a different meaning. In particular, it is used
12413 * on those chips to enable a PCI-X workaround.
12415 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12418 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12421 /* Unneeded, already done by tg3_get_invariants. */
12422 tg3_switch_clocks(tp);
12426 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12427 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12430 /* It is best to perform DMA test with maximum write burst size
12431 * to expose the 5700/5701 write DMA bug.
12433 saved_dma_rwctrl = tp->dma_rwctrl;
12434 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12435 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12440 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12443 /* Send the buffer to the chip. */
12444 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12446 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12451 /* validate data reached card RAM correctly. */
12452 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12454 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12455 if (le32_to_cpu(val) != p[i]) {
12456 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12457 /* ret = -ENODEV here? */
12462 /* Now read it back. */
12463 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12465 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12471 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12475 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12476 DMA_RWCTRL_WRITE_BNDRY_16) {
12477 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12478 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12479 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12482 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12488 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12494 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12495 DMA_RWCTRL_WRITE_BNDRY_16) {
12496 static struct pci_device_id dma_wait_state_chipsets[] = {
12497 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12498 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12502 /* DMA test passed without adjusting DMA boundary,
12503 * now look for chipsets that are known to expose the
12504 * DMA bug without failing the test.
12506 if (pci_dev_present(dma_wait_state_chipsets)) {
12507 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12508 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12511 /* Safe to use the calculated DMA boundary. */
12512 tp->dma_rwctrl = saved_dma_rwctrl;
12514 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12518 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12523 static void __devinit tg3_init_link_config(struct tg3 *tp)
12525 tp->link_config.advertising =
12526 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12527 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12528 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12529 ADVERTISED_Autoneg | ADVERTISED_MII);
12530 tp->link_config.speed = SPEED_INVALID;
12531 tp->link_config.duplex = DUPLEX_INVALID;
12532 tp->link_config.autoneg = AUTONEG_ENABLE;
12533 tp->link_config.active_speed = SPEED_INVALID;
12534 tp->link_config.active_duplex = DUPLEX_INVALID;
12535 tp->link_config.phy_is_low_power = 0;
12536 tp->link_config.orig_speed = SPEED_INVALID;
12537 tp->link_config.orig_duplex = DUPLEX_INVALID;
12538 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12541 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12543 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12544 tp->bufmgr_config.mbuf_read_dma_low_water =
12545 DEFAULT_MB_RDMA_LOW_WATER_5705;
12546 tp->bufmgr_config.mbuf_mac_rx_low_water =
12547 DEFAULT_MB_MACRX_LOW_WATER_5705;
12548 tp->bufmgr_config.mbuf_high_water =
12549 DEFAULT_MB_HIGH_WATER_5705;
12550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12551 tp->bufmgr_config.mbuf_mac_rx_low_water =
12552 DEFAULT_MB_MACRX_LOW_WATER_5906;
12553 tp->bufmgr_config.mbuf_high_water =
12554 DEFAULT_MB_HIGH_WATER_5906;
12557 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12558 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12559 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12560 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12561 tp->bufmgr_config.mbuf_high_water_jumbo =
12562 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12564 tp->bufmgr_config.mbuf_read_dma_low_water =
12565 DEFAULT_MB_RDMA_LOW_WATER;
12566 tp->bufmgr_config.mbuf_mac_rx_low_water =
12567 DEFAULT_MB_MACRX_LOW_WATER;
12568 tp->bufmgr_config.mbuf_high_water =
12569 DEFAULT_MB_HIGH_WATER;
12571 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12572 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12573 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12574 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12575 tp->bufmgr_config.mbuf_high_water_jumbo =
12576 DEFAULT_MB_HIGH_WATER_JUMBO;
12579 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12580 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12583 static char * __devinit tg3_phy_string(struct tg3 *tp)
12585 switch (tp->phy_id & PHY_ID_MASK) {
12586 case PHY_ID_BCM5400: return "5400";
12587 case PHY_ID_BCM5401: return "5401";
12588 case PHY_ID_BCM5411: return "5411";
12589 case PHY_ID_BCM5701: return "5701";
12590 case PHY_ID_BCM5703: return "5703";
12591 case PHY_ID_BCM5704: return "5704";
12592 case PHY_ID_BCM5705: return "5705";
12593 case PHY_ID_BCM5750: return "5750";
12594 case PHY_ID_BCM5752: return "5752";
12595 case PHY_ID_BCM5714: return "5714";
12596 case PHY_ID_BCM5780: return "5780";
12597 case PHY_ID_BCM5755: return "5755";
12598 case PHY_ID_BCM5787: return "5787";
12599 case PHY_ID_BCM5784: return "5784";
12600 case PHY_ID_BCM5756: return "5722/5756";
12601 case PHY_ID_BCM5906: return "5906";
12602 case PHY_ID_BCM5761: return "5761";
12603 case PHY_ID_BCM8002: return "8002/serdes";
12604 case 0: return "serdes";
12605 default: return "unknown";
12609 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12611 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12612 strcpy(str, "PCI Express");
12614 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12615 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12617 strcpy(str, "PCIX:");
12619 if ((clock_ctrl == 7) ||
12620 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12621 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12622 strcat(str, "133MHz");
12623 else if (clock_ctrl == 0)
12624 strcat(str, "33MHz");
12625 else if (clock_ctrl == 2)
12626 strcat(str, "50MHz");
12627 else if (clock_ctrl == 4)
12628 strcat(str, "66MHz");
12629 else if (clock_ctrl == 6)
12630 strcat(str, "100MHz");
12632 strcpy(str, "PCI:");
12633 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12634 strcat(str, "66MHz");
12636 strcat(str, "33MHz");
12638 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12639 strcat(str, ":32-bit");
12641 strcat(str, ":64-bit");
12645 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12647 struct pci_dev *peer;
12648 unsigned int func, devnr = tp->pdev->devfn & ~7;
12650 for (func = 0; func < 8; func++) {
12651 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12652 if (peer && peer != tp->pdev)
12656 /* 5704 can be configured in single-port mode, set peer to
12657 * tp->pdev in that case.
12665 * We don't need to keep the refcount elevated; there's no way
12666 * to remove one half of this device without removing the other
12673 static void __devinit tg3_init_coal(struct tg3 *tp)
12675 struct ethtool_coalesce *ec = &tp->coal;
12677 memset(ec, 0, sizeof(*ec));
12678 ec->cmd = ETHTOOL_GCOALESCE;
12679 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12680 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12681 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12682 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12683 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12684 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12685 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12686 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12687 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12689 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12690 HOSTCC_MODE_CLRTICK_TXBD)) {
12691 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12692 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12693 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12694 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12697 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12698 ec->rx_coalesce_usecs_irq = 0;
12699 ec->tx_coalesce_usecs_irq = 0;
12700 ec->stats_block_coalesce_usecs = 0;
12704 static int __devinit tg3_init_one(struct pci_dev *pdev,
12705 const struct pci_device_id *ent)
12707 static int tg3_version_printed = 0;
12708 resource_size_t tg3reg_base;
12709 unsigned long tg3reg_len;
12710 struct net_device *dev;
12714 u64 dma_mask, persist_dma_mask;
12715 DECLARE_MAC_BUF(mac);
12717 if (tg3_version_printed++ == 0)
12718 printk(KERN_INFO "%s", version);
12720 err = pci_enable_device(pdev);
12722 printk(KERN_ERR PFX "Cannot enable PCI device, "
12727 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12728 printk(KERN_ERR PFX "Cannot find proper PCI device "
12729 "base address, aborting.\n");
12731 goto err_out_disable_pdev;
12734 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12736 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12738 goto err_out_disable_pdev;
12741 pci_set_master(pdev);
12743 /* Find power-management capability. */
12744 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12746 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12749 goto err_out_free_res;
12752 tg3reg_base = pci_resource_start(pdev, 0);
12753 tg3reg_len = pci_resource_len(pdev, 0);
12755 dev = alloc_etherdev(sizeof(*tp));
12757 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12759 goto err_out_free_res;
12762 SET_NETDEV_DEV(dev, &pdev->dev);
12764 #if TG3_VLAN_TAG_USED
12765 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12766 dev->vlan_rx_register = tg3_vlan_rx_register;
12769 tp = netdev_priv(dev);
12772 tp->pm_cap = pm_cap;
12773 tp->mac_mode = TG3_DEF_MAC_MODE;
12774 tp->rx_mode = TG3_DEF_RX_MODE;
12775 tp->tx_mode = TG3_DEF_TX_MODE;
12778 tp->msg_enable = tg3_debug;
12780 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12782 /* The word/byte swap controls here control register access byte
12783 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12786 tp->misc_host_ctrl =
12787 MISC_HOST_CTRL_MASK_PCI_INT |
12788 MISC_HOST_CTRL_WORD_SWAP |
12789 MISC_HOST_CTRL_INDIR_ACCESS |
12790 MISC_HOST_CTRL_PCISTATE_RW;
12792 /* The NONFRM (non-frame) byte/word swap controls take effect
12793 * on descriptor entries, anything which isn't packet data.
12795 * The StrongARM chips on the board (one for tx, one for rx)
12796 * are running in big-endian mode.
12798 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12799 GRC_MODE_WSWAP_NONFRM_DATA);
12800 #ifdef __BIG_ENDIAN
12801 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12803 spin_lock_init(&tp->lock);
12804 spin_lock_init(&tp->indirect_lock);
12805 INIT_WORK(&tp->reset_task, tg3_reset_task);
12807 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12809 printk(KERN_ERR PFX "Cannot map device registers, "
12812 goto err_out_free_dev;
12815 tg3_init_link_config(tp);
12817 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12818 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12819 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12821 dev->open = tg3_open;
12822 dev->stop = tg3_close;
12823 dev->get_stats = tg3_get_stats;
12824 dev->set_multicast_list = tg3_set_rx_mode;
12825 dev->set_mac_address = tg3_set_mac_addr;
12826 dev->do_ioctl = tg3_ioctl;
12827 dev->tx_timeout = tg3_tx_timeout;
12828 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12829 dev->ethtool_ops = &tg3_ethtool_ops;
12830 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12831 dev->change_mtu = tg3_change_mtu;
12832 dev->irq = pdev->irq;
12833 #ifdef CONFIG_NET_POLL_CONTROLLER
12834 dev->poll_controller = tg3_poll_controller;
12837 err = tg3_get_invariants(tp);
12839 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12841 goto err_out_iounmap;
12844 /* The EPB bridge inside 5714, 5715, and 5780 and any
12845 * device behind the EPB cannot support DMA addresses > 40-bit.
12846 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12847 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12848 * do DMA address check in tg3_start_xmit().
12850 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12851 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12852 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12853 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12854 #ifdef CONFIG_HIGHMEM
12855 dma_mask = DMA_64BIT_MASK;
12858 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12860 /* Configure DMA attributes. */
12861 if (dma_mask > DMA_32BIT_MASK) {
12862 err = pci_set_dma_mask(pdev, dma_mask);
12864 dev->features |= NETIF_F_HIGHDMA;
12865 err = pci_set_consistent_dma_mask(pdev,
12868 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12869 "DMA for consistent allocations\n");
12870 goto err_out_iounmap;
12874 if (err || dma_mask == DMA_32BIT_MASK) {
12875 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12877 printk(KERN_ERR PFX "No usable DMA configuration, "
12879 goto err_out_iounmap;
12883 tg3_init_bufmgr_config(tp);
12885 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12886 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12888 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12890 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12892 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12893 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12895 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12898 /* TSO is on by default on chips that support hardware TSO.
12899 * Firmware TSO on older chips gives lower performance, so it
12900 * is off by default, but can be enabled using ethtool.
12902 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12903 dev->features |= NETIF_F_TSO;
12904 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12905 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12906 dev->features |= NETIF_F_TSO6;
12907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12908 dev->features |= NETIF_F_TSO_ECN;
12912 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12913 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12914 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12915 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12916 tp->rx_pending = 63;
12919 err = tg3_get_device_address(tp);
12921 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12923 goto err_out_iounmap;
12926 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12927 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12928 printk(KERN_ERR PFX "Cannot find proper PCI device "
12929 "base address for APE, aborting.\n");
12931 goto err_out_iounmap;
12934 tg3reg_base = pci_resource_start(pdev, 2);
12935 tg3reg_len = pci_resource_len(pdev, 2);
12937 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12938 if (!tp->aperegs) {
12939 printk(KERN_ERR PFX "Cannot map APE registers, "
12942 goto err_out_iounmap;
12945 tg3_ape_lock_init(tp);
12949 * Reset chip in case UNDI or EFI driver did not shutdown
12950 * DMA self test will enable WDMAC and we'll see (spurious)
12951 * pending DMA on the PCI bus at that point.
12953 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12954 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12955 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12956 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12959 err = tg3_test_dma(tp);
12961 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12962 goto err_out_apeunmap;
12965 /* Tigon3 can do ipv4 only... and some chips have buggy
12968 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12969 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12974 dev->features |= NETIF_F_IPV6_CSUM;
12976 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12978 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12980 /* flow control autonegotiation is default behavior */
12981 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12982 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12986 pci_set_drvdata(pdev, dev);
12988 err = register_netdev(dev);
12990 printk(KERN_ERR PFX "Cannot register net device, "
12992 goto err_out_apeunmap;
12995 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12996 "(%s) %s Ethernet %s\n",
12998 tp->board_part_number,
12999 tp->pci_chip_rev_id,
13000 tg3_phy_string(tp),
13001 tg3_bus_string(tp, str),
13002 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13003 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13004 "10/100/1000Base-T")),
13005 print_mac(mac, dev->dev_addr));
13007 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13008 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13010 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13011 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13012 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13013 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13014 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13015 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13016 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13017 dev->name, tp->dma_rwctrl,
13018 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13019 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13025 iounmap(tp->aperegs);
13026 tp->aperegs = NULL;
13039 pci_release_regions(pdev);
13041 err_out_disable_pdev:
13042 pci_disable_device(pdev);
13043 pci_set_drvdata(pdev, NULL);
13047 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13049 struct net_device *dev = pci_get_drvdata(pdev);
13052 struct tg3 *tp = netdev_priv(dev);
13054 flush_scheduled_work();
13055 unregister_netdev(dev);
13057 iounmap(tp->aperegs);
13058 tp->aperegs = NULL;
13065 pci_release_regions(pdev);
13066 pci_disable_device(pdev);
13067 pci_set_drvdata(pdev, NULL);
13071 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13073 struct net_device *dev = pci_get_drvdata(pdev);
13074 struct tg3 *tp = netdev_priv(dev);
13077 /* PCI register 4 needs to be saved whether netif_running() or not.
13078 * MSI address and data need to be saved if using MSI and
13081 pci_save_state(pdev);
13083 if (!netif_running(dev))
13086 flush_scheduled_work();
13087 tg3_netif_stop(tp);
13089 del_timer_sync(&tp->timer);
13091 tg3_full_lock(tp, 1);
13092 tg3_disable_ints(tp);
13093 tg3_full_unlock(tp);
13095 netif_device_detach(dev);
13097 tg3_full_lock(tp, 0);
13098 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13099 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13100 tg3_full_unlock(tp);
13102 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13104 tg3_full_lock(tp, 0);
13106 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13107 if (tg3_restart_hw(tp, 1))
13110 tp->timer.expires = jiffies + tp->timer_offset;
13111 add_timer(&tp->timer);
13113 netif_device_attach(dev);
13114 tg3_netif_start(tp);
13117 tg3_full_unlock(tp);
13123 static int tg3_resume(struct pci_dev *pdev)
13125 struct net_device *dev = pci_get_drvdata(pdev);
13126 struct tg3 *tp = netdev_priv(dev);
13129 pci_restore_state(tp->pdev);
13131 if (!netif_running(dev))
13134 err = tg3_set_power_state(tp, PCI_D0);
13138 netif_device_attach(dev);
13140 tg3_full_lock(tp, 0);
13142 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13143 err = tg3_restart_hw(tp, 1);
13147 tp->timer.expires = jiffies + tp->timer_offset;
13148 add_timer(&tp->timer);
13150 tg3_netif_start(tp);
13153 tg3_full_unlock(tp);
13158 static struct pci_driver tg3_driver = {
13159 .name = DRV_MODULE_NAME,
13160 .id_table = tg3_pci_tbl,
13161 .probe = tg3_init_one,
13162 .remove = __devexit_p(tg3_remove_one),
13163 .suspend = tg3_suspend,
13164 .resume = tg3_resume
13167 static int __init tg3_init(void)
13169 return pci_register_driver(&tg3_driver);
13172 static void __exit tg3_cleanup(void)
13174 pci_unregister_driver(&tg3_driver);
13177 module_init(tg3_init);
13178 module_exit(tg3_cleanup);