]> err.no Git - linux-2.6/blob - drivers/net/bnx2.c
[BNX2]: Restructure RX ring init. code.
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x8000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.6.9"
60 #define DRV_MODULE_RELDATE      "December 8, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423         atomic_inc(&bp->intr_sem);
424         bnx2_disable_int(bp);
425         synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431         bnx2_disable_int_sync(bp);
432         if (netif_running(bp->dev)) {
433                 napi_disable(&bp->napi);
434                 netif_tx_disable(bp->dev);
435                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436         }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442         if (atomic_dec_and_test(&bp->intr_sem)) {
443                 if (netif_running(bp->dev)) {
444                         netif_wake_queue(bp->dev);
445                         napi_enable(&bp->napi);
446                         bnx2_enable_int(bp);
447                 }
448         }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454         int i;
455
456         for (i = 0; i < bp->ctx_pages; i++) {
457                 if (bp->ctx_blk[i]) {
458                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459                                             bp->ctx_blk[i],
460                                             bp->ctx_blk_mapping[i]);
461                         bp->ctx_blk[i] = NULL;
462                 }
463         }
464         if (bp->status_blk) {
465                 pci_free_consistent(bp->pdev, bp->status_stats_size,
466                                     bp->status_blk, bp->status_blk_mapping);
467                 bp->status_blk = NULL;
468                 bp->stats_blk = NULL;
469         }
470         if (bp->tx_desc_ring) {
471                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472                                     bp->tx_desc_ring, bp->tx_desc_mapping);
473                 bp->tx_desc_ring = NULL;
474         }
475         kfree(bp->tx_buf_ring);
476         bp->tx_buf_ring = NULL;
477         for (i = 0; i < bp->rx_max_ring; i++) {
478                 if (bp->rx_desc_ring[i])
479                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
480                                             bp->rx_desc_ring[i],
481                                             bp->rx_desc_mapping[i]);
482                 bp->rx_desc_ring[i] = NULL;
483         }
484         vfree(bp->rx_buf_ring);
485         bp->rx_buf_ring = NULL;
486 }
487
488 static int
489 bnx2_alloc_mem(struct bnx2 *bp)
490 {
491         int i, status_blk_size;
492
493         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
494         if (bp->tx_buf_ring == NULL)
495                 return -ENOMEM;
496
497         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
498                                                 &bp->tx_desc_mapping);
499         if (bp->tx_desc_ring == NULL)
500                 goto alloc_mem_err;
501
502         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
503         if (bp->rx_buf_ring == NULL)
504                 goto alloc_mem_err;
505
506         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
507
508         for (i = 0; i < bp->rx_max_ring; i++) {
509                 bp->rx_desc_ring[i] =
510                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
511                                              &bp->rx_desc_mapping[i]);
512                 if (bp->rx_desc_ring[i] == NULL)
513                         goto alloc_mem_err;
514
515         }
516
517         /* Combine status and statistics blocks into one allocation. */
518         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519         bp->status_stats_size = status_blk_size +
520                                 sizeof(struct statistics_block);
521
522         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
523                                               &bp->status_blk_mapping);
524         if (bp->status_blk == NULL)
525                 goto alloc_mem_err;
526
527         memset(bp->status_blk, 0, bp->status_stats_size);
528
529         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
530                                   status_blk_size);
531
532         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
533
534         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536                 if (bp->ctx_pages == 0)
537                         bp->ctx_pages = 1;
538                 for (i = 0; i < bp->ctx_pages; i++) {
539                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
540                                                 BCM_PAGE_SIZE,
541                                                 &bp->ctx_blk_mapping[i]);
542                         if (bp->ctx_blk[i] == NULL)
543                                 goto alloc_mem_err;
544                 }
545         }
546         return 0;
547
548 alloc_mem_err:
549         bnx2_free_mem(bp);
550         return -ENOMEM;
551 }
552
553 static void
554 bnx2_report_fw_link(struct bnx2 *bp)
555 {
556         u32 fw_link_status = 0;
557
558         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559                 return;
560
561         if (bp->link_up) {
562                 u32 bmsr;
563
564                 switch (bp->line_speed) {
565                 case SPEED_10:
566                         if (bp->duplex == DUPLEX_HALF)
567                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
568                         else
569                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
570                         break;
571                 case SPEED_100:
572                         if (bp->duplex == DUPLEX_HALF)
573                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
574                         else
575                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
576                         break;
577                 case SPEED_1000:
578                         if (bp->duplex == DUPLEX_HALF)
579                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
580                         else
581                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
582                         break;
583                 case SPEED_2500:
584                         if (bp->duplex == DUPLEX_HALF)
585                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
586                         else
587                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
588                         break;
589                 }
590
591                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
592
593                 if (bp->autoneg) {
594                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
595
596                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
598
599                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
602                         else
603                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
604                 }
605         }
606         else
607                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
608
609         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
610 }
611
612 static char *
613 bnx2_xceiver_str(struct bnx2 *bp)
614 {
615         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
617                  "Copper"));
618 }
619
620 static void
621 bnx2_report_link(struct bnx2 *bp)
622 {
623         if (bp->link_up) {
624                 netif_carrier_on(bp->dev);
625                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626                        bnx2_xceiver_str(bp));
627
628                 printk("%d Mbps ", bp->line_speed);
629
630                 if (bp->duplex == DUPLEX_FULL)
631                         printk("full duplex");
632                 else
633                         printk("half duplex");
634
635                 if (bp->flow_ctrl) {
636                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
637                                 printk(", receive ");
638                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
639                                         printk("& transmit ");
640                         }
641                         else {
642                                 printk(", transmit ");
643                         }
644                         printk("flow control ON");
645                 }
646                 printk("\n");
647         }
648         else {
649                 netif_carrier_off(bp->dev);
650                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651                        bnx2_xceiver_str(bp));
652         }
653
654         bnx2_report_fw_link(bp);
655 }
656
657 static void
658 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
659 {
660         u32 local_adv, remote_adv;
661
662         bp->flow_ctrl = 0;
663         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
664                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
665
666                 if (bp->duplex == DUPLEX_FULL) {
667                         bp->flow_ctrl = bp->req_flow_ctrl;
668                 }
669                 return;
670         }
671
672         if (bp->duplex != DUPLEX_FULL) {
673                 return;
674         }
675
676         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
678                 u32 val;
679
680                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682                         bp->flow_ctrl |= FLOW_CTRL_TX;
683                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684                         bp->flow_ctrl |= FLOW_CTRL_RX;
685                 return;
686         }
687
688         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
690
691         if (bp->phy_flags & PHY_SERDES_FLAG) {
692                 u32 new_local_adv = 0;
693                 u32 new_remote_adv = 0;
694
695                 if (local_adv & ADVERTISE_1000XPAUSE)
696                         new_local_adv |= ADVERTISE_PAUSE_CAP;
697                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
699                 if (remote_adv & ADVERTISE_1000XPAUSE)
700                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
701                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
703
704                 local_adv = new_local_adv;
705                 remote_adv = new_remote_adv;
706         }
707
708         /* See Table 28B-3 of 802.3ab-1999 spec. */
709         if (local_adv & ADVERTISE_PAUSE_CAP) {
710                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
712                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
713                         }
714                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715                                 bp->flow_ctrl = FLOW_CTRL_RX;
716                         }
717                 }
718                 else {
719                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
720                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721                         }
722                 }
723         }
724         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
727
728                         bp->flow_ctrl = FLOW_CTRL_TX;
729                 }
730         }
731 }
732
733 static int
734 bnx2_5709s_linkup(struct bnx2 *bp)
735 {
736         u32 val, speed;
737
738         bp->link_up = 1;
739
740         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
743
744         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745                 bp->line_speed = bp->req_line_speed;
746                 bp->duplex = bp->req_duplex;
747                 return 0;
748         }
749         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
750         switch (speed) {
751                 case MII_BNX2_GP_TOP_AN_SPEED_10:
752                         bp->line_speed = SPEED_10;
753                         break;
754                 case MII_BNX2_GP_TOP_AN_SPEED_100:
755                         bp->line_speed = SPEED_100;
756                         break;
757                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759                         bp->line_speed = SPEED_1000;
760                         break;
761                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762                         bp->line_speed = SPEED_2500;
763                         break;
764         }
765         if (val & MII_BNX2_GP_TOP_AN_FD)
766                 bp->duplex = DUPLEX_FULL;
767         else
768                 bp->duplex = DUPLEX_HALF;
769         return 0;
770 }
771
772 static int
773 bnx2_5708s_linkup(struct bnx2 *bp)
774 {
775         u32 val;
776
777         bp->link_up = 1;
778         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780                 case BCM5708S_1000X_STAT1_SPEED_10:
781                         bp->line_speed = SPEED_10;
782                         break;
783                 case BCM5708S_1000X_STAT1_SPEED_100:
784                         bp->line_speed = SPEED_100;
785                         break;
786                 case BCM5708S_1000X_STAT1_SPEED_1G:
787                         bp->line_speed = SPEED_1000;
788                         break;
789                 case BCM5708S_1000X_STAT1_SPEED_2G5:
790                         bp->line_speed = SPEED_2500;
791                         break;
792         }
793         if (val & BCM5708S_1000X_STAT1_FD)
794                 bp->duplex = DUPLEX_FULL;
795         else
796                 bp->duplex = DUPLEX_HALF;
797
798         return 0;
799 }
800
801 static int
802 bnx2_5706s_linkup(struct bnx2 *bp)
803 {
804         u32 bmcr, local_adv, remote_adv, common;
805
806         bp->link_up = 1;
807         bp->line_speed = SPEED_1000;
808
809         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
810         if (bmcr & BMCR_FULLDPLX) {
811                 bp->duplex = DUPLEX_FULL;
812         }
813         else {
814                 bp->duplex = DUPLEX_HALF;
815         }
816
817         if (!(bmcr & BMCR_ANENABLE)) {
818                 return 0;
819         }
820
821         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
823
824         common = local_adv & remote_adv;
825         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
826
827                 if (common & ADVERTISE_1000XFULL) {
828                         bp->duplex = DUPLEX_FULL;
829                 }
830                 else {
831                         bp->duplex = DUPLEX_HALF;
832                 }
833         }
834
835         return 0;
836 }
837
838 static int
839 bnx2_copper_linkup(struct bnx2 *bp)
840 {
841         u32 bmcr;
842
843         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
844         if (bmcr & BMCR_ANENABLE) {
845                 u32 local_adv, remote_adv, common;
846
847                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
849
850                 common = local_adv & (remote_adv >> 2);
851                 if (common & ADVERTISE_1000FULL) {
852                         bp->line_speed = SPEED_1000;
853                         bp->duplex = DUPLEX_FULL;
854                 }
855                 else if (common & ADVERTISE_1000HALF) {
856                         bp->line_speed = SPEED_1000;
857                         bp->duplex = DUPLEX_HALF;
858                 }
859                 else {
860                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
862
863                         common = local_adv & remote_adv;
864                         if (common & ADVERTISE_100FULL) {
865                                 bp->line_speed = SPEED_100;
866                                 bp->duplex = DUPLEX_FULL;
867                         }
868                         else if (common & ADVERTISE_100HALF) {
869                                 bp->line_speed = SPEED_100;
870                                 bp->duplex = DUPLEX_HALF;
871                         }
872                         else if (common & ADVERTISE_10FULL) {
873                                 bp->line_speed = SPEED_10;
874                                 bp->duplex = DUPLEX_FULL;
875                         }
876                         else if (common & ADVERTISE_10HALF) {
877                                 bp->line_speed = SPEED_10;
878                                 bp->duplex = DUPLEX_HALF;
879                         }
880                         else {
881                                 bp->line_speed = 0;
882                                 bp->link_up = 0;
883                         }
884                 }
885         }
886         else {
887                 if (bmcr & BMCR_SPEED100) {
888                         bp->line_speed = SPEED_100;
889                 }
890                 else {
891                         bp->line_speed = SPEED_10;
892                 }
893                 if (bmcr & BMCR_FULLDPLX) {
894                         bp->duplex = DUPLEX_FULL;
895                 }
896                 else {
897                         bp->duplex = DUPLEX_HALF;
898                 }
899         }
900
901         return 0;
902 }
903
904 static int
905 bnx2_set_mac_link(struct bnx2 *bp)
906 {
907         u32 val;
908
909         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911                 (bp->duplex == DUPLEX_HALF)) {
912                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
913         }
914
915         /* Configure the EMAC mode register. */
916         val = REG_RD(bp, BNX2_EMAC_MODE);
917
918         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
919                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
920                 BNX2_EMAC_MODE_25G_MODE);
921
922         if (bp->link_up) {
923                 switch (bp->line_speed) {
924                         case SPEED_10:
925                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
927                                         break;
928                                 }
929                                 /* fall through */
930                         case SPEED_100:
931                                 val |= BNX2_EMAC_MODE_PORT_MII;
932                                 break;
933                         case SPEED_2500:
934                                 val |= BNX2_EMAC_MODE_25G_MODE;
935                                 /* fall through */
936                         case SPEED_1000:
937                                 val |= BNX2_EMAC_MODE_PORT_GMII;
938                                 break;
939                 }
940         }
941         else {
942                 val |= BNX2_EMAC_MODE_PORT_GMII;
943         }
944
945         /* Set the MAC to operate in the appropriate duplex mode. */
946         if (bp->duplex == DUPLEX_HALF)
947                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948         REG_WR(bp, BNX2_EMAC_MODE, val);
949
950         /* Enable/disable rx PAUSE. */
951         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
952
953         if (bp->flow_ctrl & FLOW_CTRL_RX)
954                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
956
957         /* Enable/disable tx PAUSE. */
958         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
960
961         if (bp->flow_ctrl & FLOW_CTRL_TX)
962                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
964
965         /* Acknowledge the interrupt. */
966         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967
968         return 0;
969 }
970
971 static void
972 bnx2_enable_bmsr1(struct bnx2 *bp)
973 {
974         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975             (CHIP_NUM(bp) == CHIP_NUM_5709))
976                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977                                MII_BNX2_BLK_ADDR_GP_STATUS);
978 }
979
980 static void
981 bnx2_disable_bmsr1(struct bnx2 *bp)
982 {
983         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984             (CHIP_NUM(bp) == CHIP_NUM_5709))
985                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987 }
988
989 static int
990 bnx2_test_and_enable_2g5(struct bnx2 *bp)
991 {
992         u32 up1;
993         int ret = 1;
994
995         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
996                 return 0;
997
998         if (bp->autoneg & AUTONEG_SPEED)
999                 bp->advertising |= ADVERTISED_2500baseX_Full;
1000
1001         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1003
1004         bnx2_read_phy(bp, bp->mii_up1, &up1);
1005         if (!(up1 & BCM5708S_UP1_2G5)) {
1006                 up1 |= BCM5708S_UP1_2G5;
1007                 bnx2_write_phy(bp, bp->mii_up1, up1);
1008                 ret = 0;
1009         }
1010
1011         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014
1015         return ret;
1016 }
1017
1018 static int
1019 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1020 {
1021         u32 up1;
1022         int ret = 0;
1023
1024         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025                 return 0;
1026
1027         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1029
1030         bnx2_read_phy(bp, bp->mii_up1, &up1);
1031         if (up1 & BCM5708S_UP1_2G5) {
1032                 up1 &= ~BCM5708S_UP1_2G5;
1033                 bnx2_write_phy(bp, bp->mii_up1, up1);
1034                 ret = 1;
1035         }
1036
1037         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040
1041         return ret;
1042 }
1043
1044 static void
1045 bnx2_enable_forced_2g5(struct bnx2 *bp)
1046 {
1047         u32 bmcr;
1048
1049         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1050                 return;
1051
1052         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1053                 u32 val;
1054
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1057                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1061
1062                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1065
1066         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1067                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1069         }
1070
1071         if (bp->autoneg & AUTONEG_SPEED) {
1072                 bmcr &= ~BMCR_ANENABLE;
1073                 if (bp->req_duplex == DUPLEX_FULL)
1074                         bmcr |= BMCR_FULLDPLX;
1075         }
1076         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1077 }
1078
1079 static void
1080 bnx2_disable_forced_2g5(struct bnx2 *bp)
1081 {
1082         u32 bmcr;
1083
1084         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1085                 return;
1086
1087         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1088                 u32 val;
1089
1090                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1092                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1095
1096                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1099
1100         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1101                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1103         }
1104
1105         if (bp->autoneg & AUTONEG_SPEED)
1106                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1108 }
1109
1110 static int
1111 bnx2_set_link(struct bnx2 *bp)
1112 {
1113         u32 bmsr;
1114         u8 link_up;
1115
1116         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1117                 bp->link_up = 1;
1118                 return 0;
1119         }
1120
1121         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1122                 return 0;
1123
1124         link_up = bp->link_up;
1125
1126         bnx2_enable_bmsr1(bp);
1127         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129         bnx2_disable_bmsr1(bp);
1130
1131         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1133                 u32 val;
1134
1135                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136                 if (val & BNX2_EMAC_STATUS_LINK)
1137                         bmsr |= BMSR_LSTATUS;
1138                 else
1139                         bmsr &= ~BMSR_LSTATUS;
1140         }
1141
1142         if (bmsr & BMSR_LSTATUS) {
1143                 bp->link_up = 1;
1144
1145                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1146                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147                                 bnx2_5706s_linkup(bp);
1148                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149                                 bnx2_5708s_linkup(bp);
1150                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151                                 bnx2_5709s_linkup(bp);
1152                 }
1153                 else {
1154                         bnx2_copper_linkup(bp);
1155                 }
1156                 bnx2_resolve_flow_ctrl(bp);
1157         }
1158         else {
1159                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1160                     (bp->autoneg & AUTONEG_SPEED))
1161                         bnx2_disable_forced_2g5(bp);
1162
1163                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1164                 bp->link_up = 0;
1165         }
1166
1167         if (bp->link_up != link_up) {
1168                 bnx2_report_link(bp);
1169         }
1170
1171         bnx2_set_mac_link(bp);
1172
1173         return 0;
1174 }
1175
1176 static int
1177 bnx2_reset_phy(struct bnx2 *bp)
1178 {
1179         int i;
1180         u32 reg;
1181
1182         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1183
1184 #define PHY_RESET_MAX_WAIT 100
1185         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1186                 udelay(10);
1187
1188                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1189                 if (!(reg & BMCR_RESET)) {
1190                         udelay(20);
1191                         break;
1192                 }
1193         }
1194         if (i == PHY_RESET_MAX_WAIT) {
1195                 return -EBUSY;
1196         }
1197         return 0;
1198 }
1199
1200 static u32
1201 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1202 {
1203         u32 adv = 0;
1204
1205         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1207
1208                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209                         adv = ADVERTISE_1000XPAUSE;
1210                 }
1211                 else {
1212                         adv = ADVERTISE_PAUSE_CAP;
1213                 }
1214         }
1215         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217                         adv = ADVERTISE_1000XPSE_ASYM;
1218                 }
1219                 else {
1220                         adv = ADVERTISE_PAUSE_ASYM;
1221                 }
1222         }
1223         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1226                 }
1227                 else {
1228                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229                 }
1230         }
1231         return adv;
1232 }
1233
1234 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1235
1236 static int
1237 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1238 {
1239         u32 speed_arg = 0, pause_adv;
1240
1241         pause_adv = bnx2_phy_get_pause_adv(bp);
1242
1243         if (bp->autoneg & AUTONEG_SPEED) {
1244                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245                 if (bp->advertising & ADVERTISED_10baseT_Half)
1246                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247                 if (bp->advertising & ADVERTISED_10baseT_Full)
1248                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249                 if (bp->advertising & ADVERTISED_100baseT_Half)
1250                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251                 if (bp->advertising & ADVERTISED_100baseT_Full)
1252                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1257         } else {
1258                 if (bp->req_line_speed == SPEED_2500)
1259                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260                 else if (bp->req_line_speed == SPEED_1000)
1261                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262                 else if (bp->req_line_speed == SPEED_100) {
1263                         if (bp->req_duplex == DUPLEX_FULL)
1264                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1265                         else
1266                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267                 } else if (bp->req_line_speed == SPEED_10) {
1268                         if (bp->req_duplex == DUPLEX_FULL)
1269                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1270                         else
1271                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1272                 }
1273         }
1274
1275         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1279
1280         if (port == PORT_TP)
1281                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1283
1284         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1285
1286         spin_unlock_bh(&bp->phy_lock);
1287         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288         spin_lock_bh(&bp->phy_lock);
1289
1290         return 0;
1291 }
1292
1293 static int
1294 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1295 {
1296         u32 adv, bmcr;
1297         u32 new_adv = 0;
1298
1299         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300                 return (bnx2_setup_remote_phy(bp, port));
1301
1302         if (!(bp->autoneg & AUTONEG_SPEED)) {
1303                 u32 new_bmcr;
1304                 int force_link_down = 0;
1305
1306                 if (bp->req_line_speed == SPEED_2500) {
1307                         if (!bnx2_test_and_enable_2g5(bp))
1308                                 force_link_down = 1;
1309                 } else if (bp->req_line_speed == SPEED_1000) {
1310                         if (bnx2_test_and_disable_2g5(bp))
1311                                 force_link_down = 1;
1312                 }
1313                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1314                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1315
1316                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1317                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1318                 new_bmcr |= BMCR_SPEED1000;
1319
1320                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321                         if (bp->req_line_speed == SPEED_2500)
1322                                 bnx2_enable_forced_2g5(bp);
1323                         else if (bp->req_line_speed == SPEED_1000) {
1324                                 bnx2_disable_forced_2g5(bp);
1325                                 new_bmcr &= ~0x2000;
1326                         }
1327
1328                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1329                         if (bp->req_line_speed == SPEED_2500)
1330                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1331                         else
1332                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1333                 }
1334
1335                 if (bp->req_duplex == DUPLEX_FULL) {
1336                         adv |= ADVERTISE_1000XFULL;
1337                         new_bmcr |= BMCR_FULLDPLX;
1338                 }
1339                 else {
1340                         adv |= ADVERTISE_1000XHALF;
1341                         new_bmcr &= ~BMCR_FULLDPLX;
1342                 }
1343                 if ((new_bmcr != bmcr) || (force_link_down)) {
1344                         /* Force a link down visible on the other side */
1345                         if (bp->link_up) {
1346                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1347                                                ~(ADVERTISE_1000XFULL |
1348                                                  ADVERTISE_1000XHALF));
1349                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1350                                         BMCR_ANRESTART | BMCR_ANENABLE);
1351
1352                                 bp->link_up = 0;
1353                                 netif_carrier_off(bp->dev);
1354                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355                                 bnx2_report_link(bp);
1356                         }
1357                         bnx2_write_phy(bp, bp->mii_adv, adv);
1358                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1359                 } else {
1360                         bnx2_resolve_flow_ctrl(bp);
1361                         bnx2_set_mac_link(bp);
1362                 }
1363                 return 0;
1364         }
1365
1366         bnx2_test_and_enable_2g5(bp);
1367
1368         if (bp->advertising & ADVERTISED_1000baseT_Full)
1369                 new_adv |= ADVERTISE_1000XFULL;
1370
1371         new_adv |= bnx2_phy_get_pause_adv(bp);
1372
1373         bnx2_read_phy(bp, bp->mii_adv, &adv);
1374         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1375
1376         bp->serdes_an_pending = 0;
1377         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378                 /* Force a link down visible on the other side */
1379                 if (bp->link_up) {
1380                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1381                         spin_unlock_bh(&bp->phy_lock);
1382                         msleep(20);
1383                         spin_lock_bh(&bp->phy_lock);
1384                 }
1385
1386                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1388                         BMCR_ANENABLE);
1389                 /* Speed up link-up time when the link partner
1390                  * does not autonegotiate which is very common
1391                  * in blade servers. Some blade servers use
1392                  * IPMI for kerboard input and it's important
1393                  * to minimize link disruptions. Autoneg. involves
1394                  * exchanging base pages plus 3 next pages and
1395                  * normally completes in about 120 msec.
1396                  */
1397                 bp->current_interval = SERDES_AN_TIMEOUT;
1398                 bp->serdes_an_pending = 1;
1399                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1400         } else {
1401                 bnx2_resolve_flow_ctrl(bp);
1402                 bnx2_set_mac_link(bp);
1403         }
1404
1405         return 0;
1406 }
1407
1408 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1409         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1410                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411                 (ADVERTISED_1000baseT_Full)
1412
1413 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1414         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1415         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1416         ADVERTISED_1000baseT_Full)
1417
1418 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1420
1421 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1422
1423 static void
1424 bnx2_set_default_remote_link(struct bnx2 *bp)
1425 {
1426         u32 link;
1427
1428         if (bp->phy_port == PORT_TP)
1429                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1430         else
1431                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1432
1433         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434                 bp->req_line_speed = 0;
1435                 bp->autoneg |= AUTONEG_SPEED;
1436                 bp->advertising = ADVERTISED_Autoneg;
1437                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438                         bp->advertising |= ADVERTISED_10baseT_Half;
1439                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440                         bp->advertising |= ADVERTISED_10baseT_Full;
1441                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442                         bp->advertising |= ADVERTISED_100baseT_Half;
1443                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444                         bp->advertising |= ADVERTISED_100baseT_Full;
1445                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446                         bp->advertising |= ADVERTISED_1000baseT_Full;
1447                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448                         bp->advertising |= ADVERTISED_2500baseX_Full;
1449         } else {
1450                 bp->autoneg = 0;
1451                 bp->advertising = 0;
1452                 bp->req_duplex = DUPLEX_FULL;
1453                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454                         bp->req_line_speed = SPEED_10;
1455                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456                                 bp->req_duplex = DUPLEX_HALF;
1457                 }
1458                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459                         bp->req_line_speed = SPEED_100;
1460                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461                                 bp->req_duplex = DUPLEX_HALF;
1462                 }
1463                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464                         bp->req_line_speed = SPEED_1000;
1465                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466                         bp->req_line_speed = SPEED_2500;
1467         }
1468 }
1469
1470 static void
1471 bnx2_set_default_link(struct bnx2 *bp)
1472 {
1473         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474                 return bnx2_set_default_remote_link(bp);
1475
1476         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477         bp->req_line_speed = 0;
1478         if (bp->phy_flags & PHY_SERDES_FLAG) {
1479                 u32 reg;
1480
1481                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1482
1483                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1486                         bp->autoneg = 0;
1487                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1488                         bp->req_duplex = DUPLEX_FULL;
1489                 }
1490         } else
1491                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1492 }
1493
1494 static void
1495 bnx2_send_heart_beat(struct bnx2 *bp)
1496 {
1497         u32 msg;
1498         u32 addr;
1499
1500         spin_lock(&bp->indirect_lock);
1501         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505         spin_unlock(&bp->indirect_lock);
1506 }
1507
1508 static void
1509 bnx2_remote_phy_event(struct bnx2 *bp)
1510 {
1511         u32 msg;
1512         u8 link_up = bp->link_up;
1513         u8 old_port;
1514
1515         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1516
1517         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518                 bnx2_send_heart_beat(bp);
1519
1520         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1521
1522         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1523                 bp->link_up = 0;
1524         else {
1525                 u32 speed;
1526
1527                 bp->link_up = 1;
1528                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529                 bp->duplex = DUPLEX_FULL;
1530                 switch (speed) {
1531                         case BNX2_LINK_STATUS_10HALF:
1532                                 bp->duplex = DUPLEX_HALF;
1533                         case BNX2_LINK_STATUS_10FULL:
1534                                 bp->line_speed = SPEED_10;
1535                                 break;
1536                         case BNX2_LINK_STATUS_100HALF:
1537                                 bp->duplex = DUPLEX_HALF;
1538                         case BNX2_LINK_STATUS_100BASE_T4:
1539                         case BNX2_LINK_STATUS_100FULL:
1540                                 bp->line_speed = SPEED_100;
1541                                 break;
1542                         case BNX2_LINK_STATUS_1000HALF:
1543                                 bp->duplex = DUPLEX_HALF;
1544                         case BNX2_LINK_STATUS_1000FULL:
1545                                 bp->line_speed = SPEED_1000;
1546                                 break;
1547                         case BNX2_LINK_STATUS_2500HALF:
1548                                 bp->duplex = DUPLEX_HALF;
1549                         case BNX2_LINK_STATUS_2500FULL:
1550                                 bp->line_speed = SPEED_2500;
1551                                 break;
1552                         default:
1553                                 bp->line_speed = 0;
1554                                 break;
1555                 }
1556
1557                 spin_lock(&bp->phy_lock);
1558                 bp->flow_ctrl = 0;
1559                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561                         if (bp->duplex == DUPLEX_FULL)
1562                                 bp->flow_ctrl = bp->req_flow_ctrl;
1563                 } else {
1564                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1566                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1568                 }
1569
1570                 old_port = bp->phy_port;
1571                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572                         bp->phy_port = PORT_FIBRE;
1573                 else
1574                         bp->phy_port = PORT_TP;
1575
1576                 if (old_port != bp->phy_port)
1577                         bnx2_set_default_link(bp);
1578
1579                 spin_unlock(&bp->phy_lock);
1580         }
1581         if (bp->link_up != link_up)
1582                 bnx2_report_link(bp);
1583
1584         bnx2_set_mac_link(bp);
1585 }
1586
1587 static int
1588 bnx2_set_remote_link(struct bnx2 *bp)
1589 {
1590         u32 evt_code;
1591
1592         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1593         switch (evt_code) {
1594                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595                         bnx2_remote_phy_event(bp);
1596                         break;
1597                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1598                 default:
1599                         bnx2_send_heart_beat(bp);
1600                         break;
1601         }
1602         return 0;
1603 }
1604
1605 static int
1606 bnx2_setup_copper_phy(struct bnx2 *bp)
1607 {
1608         u32 bmcr;
1609         u32 new_bmcr;
1610
1611         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1612
1613         if (bp->autoneg & AUTONEG_SPEED) {
1614                 u32 adv_reg, adv1000_reg;
1615                 u32 new_adv_reg = 0;
1616                 u32 new_adv1000_reg = 0;
1617
1618                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1619                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620                         ADVERTISE_PAUSE_ASYM);
1621
1622                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623                 adv1000_reg &= PHY_ALL_1000_SPEED;
1624
1625                 if (bp->advertising & ADVERTISED_10baseT_Half)
1626                         new_adv_reg |= ADVERTISE_10HALF;
1627                 if (bp->advertising & ADVERTISED_10baseT_Full)
1628                         new_adv_reg |= ADVERTISE_10FULL;
1629                 if (bp->advertising & ADVERTISED_100baseT_Half)
1630                         new_adv_reg |= ADVERTISE_100HALF;
1631                 if (bp->advertising & ADVERTISED_100baseT_Full)
1632                         new_adv_reg |= ADVERTISE_100FULL;
1633                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634                         new_adv1000_reg |= ADVERTISE_1000FULL;
1635
1636                 new_adv_reg |= ADVERTISE_CSMA;
1637
1638                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1639
1640                 if ((adv1000_reg != new_adv1000_reg) ||
1641                         (adv_reg != new_adv_reg) ||
1642                         ((bmcr & BMCR_ANENABLE) == 0)) {
1643
1644                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1645                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1646                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1647                                 BMCR_ANENABLE);
1648                 }
1649                 else if (bp->link_up) {
1650                         /* Flow ctrl may have changed from auto to forced */
1651                         /* or vice-versa. */
1652
1653                         bnx2_resolve_flow_ctrl(bp);
1654                         bnx2_set_mac_link(bp);
1655                 }
1656                 return 0;
1657         }
1658
1659         new_bmcr = 0;
1660         if (bp->req_line_speed == SPEED_100) {
1661                 new_bmcr |= BMCR_SPEED100;
1662         }
1663         if (bp->req_duplex == DUPLEX_FULL) {
1664                 new_bmcr |= BMCR_FULLDPLX;
1665         }
1666         if (new_bmcr != bmcr) {
1667                 u32 bmsr;
1668
1669                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1671
1672                 if (bmsr & BMSR_LSTATUS) {
1673                         /* Force link down */
1674                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1675                         spin_unlock_bh(&bp->phy_lock);
1676                         msleep(50);
1677                         spin_lock_bh(&bp->phy_lock);
1678
1679                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1681                 }
1682
1683                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1684
1685                 /* Normally, the new speed is setup after the link has
1686                  * gone down and up again. In some cases, link will not go
1687                  * down so we need to set up the new speed here.
1688                  */
1689                 if (bmsr & BMSR_LSTATUS) {
1690                         bp->line_speed = bp->req_line_speed;
1691                         bp->duplex = bp->req_duplex;
1692                         bnx2_resolve_flow_ctrl(bp);
1693                         bnx2_set_mac_link(bp);
1694                 }
1695         } else {
1696                 bnx2_resolve_flow_ctrl(bp);
1697                 bnx2_set_mac_link(bp);
1698         }
1699         return 0;
1700 }
1701
1702 static int
1703 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1704 {
1705         if (bp->loopback == MAC_LOOPBACK)
1706                 return 0;
1707
1708         if (bp->phy_flags & PHY_SERDES_FLAG) {
1709                 return (bnx2_setup_serdes_phy(bp, port));
1710         }
1711         else {
1712                 return (bnx2_setup_copper_phy(bp));
1713         }
1714 }
1715
1716 static int
1717 bnx2_init_5709s_phy(struct bnx2 *bp)
1718 {
1719         u32 val;
1720
1721         bp->mii_bmcr = MII_BMCR + 0x10;
1722         bp->mii_bmsr = MII_BMSR + 0x10;
1723         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724         bp->mii_adv = MII_ADVERTISE + 0x10;
1725         bp->mii_lpa = MII_LPA + 0x10;
1726         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1727
1728         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1730
1731         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1732         bnx2_reset_phy(bp);
1733
1734         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1735
1736         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1740
1741         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744                 val |= BCM5708S_UP1_2G5;
1745         else
1746                 val &= ~BCM5708S_UP1_2G5;
1747         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1748
1749         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1753
1754         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1755
1756         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1759
1760         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761
1762         return 0;
1763 }
1764
1765 static int
1766 bnx2_init_5708s_phy(struct bnx2 *bp)
1767 {
1768         u32 val;
1769
1770         bnx2_reset_phy(bp);
1771
1772         bp->mii_up1 = BCM5708S_UP1;
1773
1774         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777
1778         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1781
1782         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1785
1786         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788                 val |= BCM5708S_UP1_2G5;
1789                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1790         }
1791
1792         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1793             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1795                 /* increase tx signal amplitude */
1796                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797                                BCM5708S_BLK_ADDR_TX_MISC);
1798                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1802         }
1803
1804         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1805               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1806
1807         if (val) {
1808                 u32 is_backplane;
1809
1810                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1811                                           BNX2_SHARED_HW_CFG_CONFIG);
1812                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814                                        BCM5708S_BLK_ADDR_TX_MISC);
1815                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817                                        BCM5708S_BLK_ADDR_DIG);
1818                 }
1819         }
1820         return 0;
1821 }
1822
1823 static int
1824 bnx2_init_5706s_phy(struct bnx2 *bp)
1825 {
1826         bnx2_reset_phy(bp);
1827
1828         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1829
1830         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1832
1833         if (bp->dev->mtu > 1500) {
1834                 u32 val;
1835
1836                 /* Set extended packet length bit */
1837                 bnx2_write_phy(bp, 0x18, 0x7);
1838                 bnx2_read_phy(bp, 0x18, &val);
1839                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1840
1841                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842                 bnx2_read_phy(bp, 0x1c, &val);
1843                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1844         }
1845         else {
1846                 u32 val;
1847
1848                 bnx2_write_phy(bp, 0x18, 0x7);
1849                 bnx2_read_phy(bp, 0x18, &val);
1850                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1851
1852                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853                 bnx2_read_phy(bp, 0x1c, &val);
1854                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1855         }
1856
1857         return 0;
1858 }
1859
1860 static int
1861 bnx2_init_copper_phy(struct bnx2 *bp)
1862 {
1863         u32 val;
1864
1865         bnx2_reset_phy(bp);
1866
1867         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868                 bnx2_write_phy(bp, 0x18, 0x0c00);
1869                 bnx2_write_phy(bp, 0x17, 0x000a);
1870                 bnx2_write_phy(bp, 0x15, 0x310b);
1871                 bnx2_write_phy(bp, 0x17, 0x201f);
1872                 bnx2_write_phy(bp, 0x15, 0x9506);
1873                 bnx2_write_phy(bp, 0x17, 0x401f);
1874                 bnx2_write_phy(bp, 0x15, 0x14e2);
1875                 bnx2_write_phy(bp, 0x18, 0x0400);
1876         }
1877
1878         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1881                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1882                 val &= ~(1 << 8);
1883                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1884         }
1885
1886         if (bp->dev->mtu > 1500) {
1887                 /* Set extended packet length bit */
1888                 bnx2_write_phy(bp, 0x18, 0x7);
1889                 bnx2_read_phy(bp, 0x18, &val);
1890                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1891
1892                 bnx2_read_phy(bp, 0x10, &val);
1893                 bnx2_write_phy(bp, 0x10, val | 0x1);
1894         }
1895         else {
1896                 bnx2_write_phy(bp, 0x18, 0x7);
1897                 bnx2_read_phy(bp, 0x18, &val);
1898                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1899
1900                 bnx2_read_phy(bp, 0x10, &val);
1901                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1902         }
1903
1904         /* ethernet@wirespeed */
1905         bnx2_write_phy(bp, 0x18, 0x7007);
1906         bnx2_read_phy(bp, 0x18, &val);
1907         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1908         return 0;
1909 }
1910
1911
1912 static int
1913 bnx2_init_phy(struct bnx2 *bp)
1914 {
1915         u32 val;
1916         int rc = 0;
1917
1918         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1920
1921         bp->mii_bmcr = MII_BMCR;
1922         bp->mii_bmsr = MII_BMSR;
1923         bp->mii_bmsr1 = MII_BMSR;
1924         bp->mii_adv = MII_ADVERTISE;
1925         bp->mii_lpa = MII_LPA;
1926
1927         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1928
1929         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1930                 goto setup_phy;
1931
1932         bnx2_read_phy(bp, MII_PHYSID1, &val);
1933         bp->phy_id = val << 16;
1934         bnx2_read_phy(bp, MII_PHYSID2, &val);
1935         bp->phy_id |= val & 0xffff;
1936
1937         if (bp->phy_flags & PHY_SERDES_FLAG) {
1938                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939                         rc = bnx2_init_5706s_phy(bp);
1940                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941                         rc = bnx2_init_5708s_phy(bp);
1942                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943                         rc = bnx2_init_5709s_phy(bp);
1944         }
1945         else {
1946                 rc = bnx2_init_copper_phy(bp);
1947         }
1948
1949 setup_phy:
1950         if (!rc)
1951                 rc = bnx2_setup_phy(bp, bp->phy_port);
1952
1953         return rc;
1954 }
1955
1956 static int
1957 bnx2_set_mac_loopback(struct bnx2 *bp)
1958 {
1959         u32 mac_mode;
1960
1961         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1965         bp->link_up = 1;
1966         return 0;
1967 }
1968
1969 static int bnx2_test_link(struct bnx2 *);
1970
1971 static int
1972 bnx2_set_phy_loopback(struct bnx2 *bp)
1973 {
1974         u32 mac_mode;
1975         int rc, i;
1976
1977         spin_lock_bh(&bp->phy_lock);
1978         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1979                             BMCR_SPEED1000);
1980         spin_unlock_bh(&bp->phy_lock);
1981         if (rc)
1982                 return rc;
1983
1984         for (i = 0; i < 10; i++) {
1985                 if (bnx2_test_link(bp) == 0)
1986                         break;
1987                 msleep(100);
1988         }
1989
1990         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1993                       BNX2_EMAC_MODE_25G_MODE);
1994
1995         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1997         bp->link_up = 1;
1998         return 0;
1999 }
2000
2001 static int
2002 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2003 {
2004         int i;
2005         u32 val;
2006
2007         bp->fw_wr_seq++;
2008         msg_data |= bp->fw_wr_seq;
2009
2010         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2011
2012         /* wait for an acknowledgement. */
2013         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2014                 msleep(10);
2015
2016                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2017
2018                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2019                         break;
2020         }
2021         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2022                 return 0;
2023
2024         /* If we timed out, inform the firmware that this is the case. */
2025         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2026                 if (!silent)
2027                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2028                                             "%x\n", msg_data);
2029
2030                 msg_data &= ~BNX2_DRV_MSG_CODE;
2031                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2032
2033                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2034
2035                 return -EBUSY;
2036         }
2037
2038         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2039                 return -EIO;
2040
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_init_5709_context(struct bnx2 *bp)
2046 {
2047         int i, ret = 0;
2048         u32 val;
2049
2050         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051         val |= (BCM_PAGE_BITS - 8) << 16;
2052         REG_WR(bp, BNX2_CTX_COMMAND, val);
2053         for (i = 0; i < 10; i++) {
2054                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2056                         break;
2057                 udelay(2);
2058         }
2059         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2060                 return -EBUSY;
2061
2062         for (i = 0; i < bp->ctx_pages; i++) {
2063                 int j;
2064
2065                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069                        (u64) bp->ctx_blk_mapping[i] >> 32);
2070                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072                 for (j = 0; j < 10; j++) {
2073
2074                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2076                                 break;
2077                         udelay(5);
2078                 }
2079                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2080                         ret = -EBUSY;
2081                         break;
2082                 }
2083         }
2084         return ret;
2085 }
2086
2087 static void
2088 bnx2_init_context(struct bnx2 *bp)
2089 {
2090         u32 vcid;
2091
2092         vcid = 96;
2093         while (vcid) {
2094                 u32 vcid_addr, pcid_addr, offset;
2095                 int i;
2096
2097                 vcid--;
2098
2099                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2100                         u32 new_vcid;
2101
2102                         vcid_addr = GET_PCID_ADDR(vcid);
2103                         if (vcid & 0x8) {
2104                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2105                         }
2106                         else {
2107                                 new_vcid = vcid;
2108                         }
2109                         pcid_addr = GET_PCID_ADDR(new_vcid);
2110                 }
2111                 else {
2112                         vcid_addr = GET_CID_ADDR(vcid);
2113                         pcid_addr = vcid_addr;
2114                 }
2115
2116                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117                         vcid_addr += (i << PHY_CTX_SHIFT);
2118                         pcid_addr += (i << PHY_CTX_SHIFT);
2119
2120                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2121                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2122
2123                         /* Zero out the context. */
2124                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125                                 CTX_WR(bp, vcid_addr, offset, 0);
2126                 }
2127         }
2128 }
2129
2130 static int
2131 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2132 {
2133         u16 *good_mbuf;
2134         u32 good_mbuf_cnt;
2135         u32 val;
2136
2137         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2138         if (good_mbuf == NULL) {
2139                 printk(KERN_ERR PFX "Failed to allocate memory in "
2140                                     "bnx2_alloc_bad_rbuf\n");
2141                 return -ENOMEM;
2142         }
2143
2144         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2145                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2146
2147         good_mbuf_cnt = 0;
2148
2149         /* Allocate a bunch of mbufs and save the good ones in an array. */
2150         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2151         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2152                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2153
2154                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2155
2156                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2157
2158                 /* The addresses with Bit 9 set are bad memory blocks. */
2159                 if (!(val & (1 << 9))) {
2160                         good_mbuf[good_mbuf_cnt] = (u16) val;
2161                         good_mbuf_cnt++;
2162                 }
2163
2164                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2165         }
2166
2167         /* Free the good ones back to the mbuf pool thus discarding
2168          * all the bad ones. */
2169         while (good_mbuf_cnt) {
2170                 good_mbuf_cnt--;
2171
2172                 val = good_mbuf[good_mbuf_cnt];
2173                 val = (val << 9) | val | 1;
2174
2175                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2176         }
2177         kfree(good_mbuf);
2178         return 0;
2179 }
2180
2181 static void
2182 bnx2_set_mac_addr(struct bnx2 *bp)
2183 {
2184         u32 val;
2185         u8 *mac_addr = bp->dev->dev_addr;
2186
2187         val = (mac_addr[0] << 8) | mac_addr[1];
2188
2189         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2190
2191         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2192                 (mac_addr[4] << 8) | mac_addr[5];
2193
2194         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2195 }
2196
2197 static inline int
2198 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2199 {
2200         struct sk_buff *skb;
2201         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2202         dma_addr_t mapping;
2203         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2204         unsigned long align;
2205
2206         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2207         if (skb == NULL) {
2208                 return -ENOMEM;
2209         }
2210
2211         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2212                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2213
2214         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2215                 PCI_DMA_FROMDEVICE);
2216
2217         rx_buf->skb = skb;
2218         pci_unmap_addr_set(rx_buf, mapping, mapping);
2219
2220         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2221         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2222
2223         bp->rx_prod_bseq += bp->rx_buf_use_size;
2224
2225         return 0;
2226 }
2227
2228 static int
2229 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2230 {
2231         struct status_block *sblk = bp->status_blk;
2232         u32 new_link_state, old_link_state;
2233         int is_set = 1;
2234
2235         new_link_state = sblk->status_attn_bits & event;
2236         old_link_state = sblk->status_attn_bits_ack & event;
2237         if (new_link_state != old_link_state) {
2238                 if (new_link_state)
2239                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2240                 else
2241                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2242         } else
2243                 is_set = 0;
2244
2245         return is_set;
2246 }
2247
2248 static void
2249 bnx2_phy_int(struct bnx2 *bp)
2250 {
2251         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2252                 spin_lock(&bp->phy_lock);
2253                 bnx2_set_link(bp);
2254                 spin_unlock(&bp->phy_lock);
2255         }
2256         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2257                 bnx2_set_remote_link(bp);
2258
2259 }
2260
2261 static void
2262 bnx2_tx_int(struct bnx2 *bp)
2263 {
2264         struct status_block *sblk = bp->status_blk;
2265         u16 hw_cons, sw_cons, sw_ring_cons;
2266         int tx_free_bd = 0;
2267
2268         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2269         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2270                 hw_cons++;
2271         }
2272         sw_cons = bp->tx_cons;
2273
2274         while (sw_cons != hw_cons) {
2275                 struct sw_bd *tx_buf;
2276                 struct sk_buff *skb;
2277                 int i, last;
2278
2279                 sw_ring_cons = TX_RING_IDX(sw_cons);
2280
2281                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2282                 skb = tx_buf->skb;
2283
2284                 /* partial BD completions possible with TSO packets */
2285                 if (skb_is_gso(skb)) {
2286                         u16 last_idx, last_ring_idx;
2287
2288                         last_idx = sw_cons +
2289                                 skb_shinfo(skb)->nr_frags + 1;
2290                         last_ring_idx = sw_ring_cons +
2291                                 skb_shinfo(skb)->nr_frags + 1;
2292                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2293                                 last_idx++;
2294                         }
2295                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2296                                 break;
2297                         }
2298                 }
2299
2300                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2301                         skb_headlen(skb), PCI_DMA_TODEVICE);
2302
2303                 tx_buf->skb = NULL;
2304                 last = skb_shinfo(skb)->nr_frags;
2305
2306                 for (i = 0; i < last; i++) {
2307                         sw_cons = NEXT_TX_BD(sw_cons);
2308
2309                         pci_unmap_page(bp->pdev,
2310                                 pci_unmap_addr(
2311                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2312                                         mapping),
2313                                 skb_shinfo(skb)->frags[i].size,
2314                                 PCI_DMA_TODEVICE);
2315                 }
2316
2317                 sw_cons = NEXT_TX_BD(sw_cons);
2318
2319                 tx_free_bd += last + 1;
2320
2321                 dev_kfree_skb(skb);
2322
2323                 hw_cons = bp->hw_tx_cons =
2324                         sblk->status_tx_quick_consumer_index0;
2325
2326                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2327                         hw_cons++;
2328                 }
2329         }
2330
2331         bp->tx_cons = sw_cons;
2332         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2333          * before checking for netif_queue_stopped().  Without the
2334          * memory barrier, there is a small possibility that bnx2_start_xmit()
2335          * will miss it and cause the queue to be stopped forever.
2336          */
2337         smp_mb();
2338
2339         if (unlikely(netif_queue_stopped(bp->dev)) &&
2340                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2341                 netif_tx_lock(bp->dev);
2342                 if ((netif_queue_stopped(bp->dev)) &&
2343                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2344                         netif_wake_queue(bp->dev);
2345                 netif_tx_unlock(bp->dev);
2346         }
2347 }
2348
2349 static inline void
2350 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2351         u16 cons, u16 prod)
2352 {
2353         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2354         struct rx_bd *cons_bd, *prod_bd;
2355
2356         cons_rx_buf = &bp->rx_buf_ring[cons];
2357         prod_rx_buf = &bp->rx_buf_ring[prod];
2358
2359         pci_dma_sync_single_for_device(bp->pdev,
2360                 pci_unmap_addr(cons_rx_buf, mapping),
2361                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2362
2363         bp->rx_prod_bseq += bp->rx_buf_use_size;
2364
2365         prod_rx_buf->skb = skb;
2366
2367         if (cons == prod)
2368                 return;
2369
2370         pci_unmap_addr_set(prod_rx_buf, mapping,
2371                         pci_unmap_addr(cons_rx_buf, mapping));
2372
2373         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2374         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2375         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2376         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2377 }
2378
2379 static int
2380 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2381             dma_addr_t dma_addr, u32 ring_idx)
2382 {
2383         int err;
2384         u16 prod = ring_idx & 0xffff;
2385
2386         err = bnx2_alloc_rx_skb(bp, prod);
2387         if (unlikely(err)) {
2388                 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2389                 return err;
2390         }
2391
2392         skb_reserve(skb, bp->rx_offset);
2393         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2394                          PCI_DMA_FROMDEVICE);
2395
2396         skb_put(skb, len);
2397         return 0;
2398 }
2399
2400 static inline u16
2401 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2402 {
2403         u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2404
2405         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2406                 cons++;
2407         return cons;
2408 }
2409
2410 static int
2411 bnx2_rx_int(struct bnx2 *bp, int budget)
2412 {
2413         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2414         struct l2_fhdr *rx_hdr;
2415         int rx_pkt = 0;
2416
2417         hw_cons = bnx2_get_hw_rx_cons(bp);
2418         sw_cons = bp->rx_cons;
2419         sw_prod = bp->rx_prod;
2420
2421         /* Memory barrier necessary as speculative reads of the rx
2422          * buffer can be ahead of the index in the status block
2423          */
2424         rmb();
2425         while (sw_cons != hw_cons) {
2426                 unsigned int len;
2427                 u32 status;
2428                 struct sw_bd *rx_buf;
2429                 struct sk_buff *skb;
2430                 dma_addr_t dma_addr;
2431
2432                 sw_ring_cons = RX_RING_IDX(sw_cons);
2433                 sw_ring_prod = RX_RING_IDX(sw_prod);
2434
2435                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2436                 skb = rx_buf->skb;
2437
2438                 rx_buf->skb = NULL;
2439
2440                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2441
2442                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2443                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2444
2445                 rx_hdr = (struct l2_fhdr *) skb->data;
2446                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2447
2448                 if ((status = rx_hdr->l2_fhdr_status) &
2449                         (L2_FHDR_ERRORS_BAD_CRC |
2450                         L2_FHDR_ERRORS_PHY_DECODE |
2451                         L2_FHDR_ERRORS_ALIGNMENT |
2452                         L2_FHDR_ERRORS_TOO_SHORT |
2453                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2454
2455                         bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2456                         goto next_rx;
2457                 }
2458
2459                 if (len <= bp->rx_copy_thresh) {
2460                         struct sk_buff *new_skb;
2461
2462                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2463                         if (new_skb == NULL) {
2464                                 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2465                                                   sw_ring_prod);
2466                                 goto next_rx;
2467                         }
2468
2469                         /* aligned copy */
2470                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2471                                       new_skb->data, len + 2);
2472                         skb_reserve(new_skb, 2);
2473                         skb_put(new_skb, len);
2474
2475                         bnx2_reuse_rx_skb(bp, skb,
2476                                 sw_ring_cons, sw_ring_prod);
2477
2478                         skb = new_skb;
2479                 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2480                                     (sw_ring_cons << 16) | sw_ring_prod)))
2481                         goto next_rx;
2482
2483                 skb->protocol = eth_type_trans(skb, bp->dev);
2484
2485                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2486                         (ntohs(skb->protocol) != 0x8100)) {
2487
2488                         dev_kfree_skb(skb);
2489                         goto next_rx;
2490
2491                 }
2492
2493                 skb->ip_summed = CHECKSUM_NONE;
2494                 if (bp->rx_csum &&
2495                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2496                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2497
2498                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2499                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2500                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2501                 }
2502
2503 #ifdef BCM_VLAN
2504                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2505                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2506                                 rx_hdr->l2_fhdr_vlan_tag);
2507                 }
2508                 else
2509 #endif
2510                         netif_receive_skb(skb);
2511
2512                 bp->dev->last_rx = jiffies;
2513                 rx_pkt++;
2514
2515 next_rx:
2516                 sw_cons = NEXT_RX_BD(sw_cons);
2517                 sw_prod = NEXT_RX_BD(sw_prod);
2518
2519                 if ((rx_pkt == budget))
2520                         break;
2521
2522                 /* Refresh hw_cons to see if there is new work */
2523                 if (sw_cons == hw_cons) {
2524                         hw_cons = bnx2_get_hw_rx_cons(bp);
2525                         rmb();
2526                 }
2527         }
2528         bp->rx_cons = sw_cons;
2529         bp->rx_prod = sw_prod;
2530
2531         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2532
2533         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2534
2535         mmiowb();
2536
2537         return rx_pkt;
2538
2539 }
2540
2541 /* MSI ISR - The only difference between this and the INTx ISR
2542  * is that the MSI interrupt is always serviced.
2543  */
2544 static irqreturn_t
2545 bnx2_msi(int irq, void *dev_instance)
2546 {
2547         struct net_device *dev = dev_instance;
2548         struct bnx2 *bp = netdev_priv(dev);
2549
2550         prefetch(bp->status_blk);
2551         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2552                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2553                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2554
2555         /* Return here if interrupt is disabled. */
2556         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2557                 return IRQ_HANDLED;
2558
2559         netif_rx_schedule(dev, &bp->napi);
2560
2561         return IRQ_HANDLED;
2562 }
2563
2564 static irqreturn_t
2565 bnx2_msi_1shot(int irq, void *dev_instance)
2566 {
2567         struct net_device *dev = dev_instance;
2568         struct bnx2 *bp = netdev_priv(dev);
2569
2570         prefetch(bp->status_blk);
2571
2572         /* Return here if interrupt is disabled. */
2573         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2574                 return IRQ_HANDLED;
2575
2576         netif_rx_schedule(dev, &bp->napi);
2577
2578         return IRQ_HANDLED;
2579 }
2580
2581 static irqreturn_t
2582 bnx2_interrupt(int irq, void *dev_instance)
2583 {
2584         struct net_device *dev = dev_instance;
2585         struct bnx2 *bp = netdev_priv(dev);
2586         struct status_block *sblk = bp->status_blk;
2587
2588         /* When using INTx, it is possible for the interrupt to arrive
2589          * at the CPU before the status block posted prior to the
2590          * interrupt. Reading a register will flush the status block.
2591          * When using MSI, the MSI message will always complete after
2592          * the status block write.
2593          */
2594         if ((sblk->status_idx == bp->last_status_idx) &&
2595             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2596              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2597                 return IRQ_NONE;
2598
2599         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2600                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2601                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2602
2603         /* Read back to deassert IRQ immediately to avoid too many
2604          * spurious interrupts.
2605          */
2606         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2607
2608         /* Return here if interrupt is shared and is disabled. */
2609         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2610                 return IRQ_HANDLED;
2611
2612         if (netif_rx_schedule_prep(dev, &bp->napi)) {
2613                 bp->last_status_idx = sblk->status_idx;
2614                 __netif_rx_schedule(dev, &bp->napi);
2615         }
2616
2617         return IRQ_HANDLED;
2618 }
2619
2620 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2621                                  STATUS_ATTN_BITS_TIMER_ABORT)
2622
2623 static inline int
2624 bnx2_has_work(struct bnx2 *bp)
2625 {
2626         struct status_block *sblk = bp->status_blk;
2627
2628         if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2629             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2630                 return 1;
2631
2632         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2633             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2634                 return 1;
2635
2636         return 0;
2637 }
2638
2639 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2640 {
2641         struct status_block *sblk = bp->status_blk;
2642         u32 status_attn_bits = sblk->status_attn_bits;
2643         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2644
2645         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2646             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2647
2648                 bnx2_phy_int(bp);
2649
2650                 /* This is needed to take care of transient status
2651                  * during link changes.
2652                  */
2653                 REG_WR(bp, BNX2_HC_COMMAND,
2654                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2655                 REG_RD(bp, BNX2_HC_COMMAND);
2656         }
2657
2658         if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2659                 bnx2_tx_int(bp);
2660
2661         if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2662                 work_done += bnx2_rx_int(bp, budget - work_done);
2663
2664         return work_done;
2665 }
2666
2667 static int bnx2_poll(struct napi_struct *napi, int budget)
2668 {
2669         struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2670         int work_done = 0;
2671         struct status_block *sblk = bp->status_blk;
2672
2673         while (1) {
2674                 work_done = bnx2_poll_work(bp, work_done, budget);
2675
2676                 if (unlikely(work_done >= budget))
2677                         break;
2678
2679                 /* bp->last_status_idx is used below to tell the hw how
2680                  * much work has been processed, so we must read it before
2681                  * checking for more work.
2682                  */
2683                 bp->last_status_idx = sblk->status_idx;
2684                 rmb();
2685                 if (likely(!bnx2_has_work(bp))) {
2686                         netif_rx_complete(bp->dev, napi);
2687                         if (likely(bp->flags & USING_MSI_FLAG)) {
2688                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2689                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2690                                        bp->last_status_idx);
2691                                 break;
2692                         }
2693                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2694                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2695                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2696                                bp->last_status_idx);
2697
2698                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2699                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2700                                bp->last_status_idx);
2701                         break;
2702                 }
2703         }
2704
2705         return work_done;
2706 }
2707
2708 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2709  * from set_multicast.
2710  */
2711 static void
2712 bnx2_set_rx_mode(struct net_device *dev)
2713 {
2714         struct bnx2 *bp = netdev_priv(dev);
2715         u32 rx_mode, sort_mode;
2716         int i;
2717
2718         spin_lock_bh(&bp->phy_lock);
2719
2720         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2721                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2722         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2723 #ifdef BCM_VLAN
2724         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2725                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2726 #else
2727         if (!(bp->flags & ASF_ENABLE_FLAG))
2728                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2729 #endif
2730         if (dev->flags & IFF_PROMISC) {
2731                 /* Promiscuous mode. */
2732                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2733                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2734                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2735         }
2736         else if (dev->flags & IFF_ALLMULTI) {
2737                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2738                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2739                                0xffffffff);
2740                 }
2741                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2742         }
2743         else {
2744                 /* Accept one or more multicast(s). */
2745                 struct dev_mc_list *mclist;
2746                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2747                 u32 regidx;
2748                 u32 bit;
2749                 u32 crc;
2750
2751                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2752
2753                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2754                      i++, mclist = mclist->next) {
2755
2756                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2757                         bit = crc & 0xff;
2758                         regidx = (bit & 0xe0) >> 5;
2759                         bit &= 0x1f;
2760                         mc_filter[regidx] |= (1 << bit);
2761                 }
2762
2763                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2764                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2765                                mc_filter[i]);
2766                 }
2767
2768                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2769         }
2770
2771         if (rx_mode != bp->rx_mode) {
2772                 bp->rx_mode = rx_mode;
2773                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2774         }
2775
2776         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2777         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2778         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2779
2780         spin_unlock_bh(&bp->phy_lock);
2781 }
2782
2783 static void
2784 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2785         u32 rv2p_proc)
2786 {
2787         int i;
2788         u32 val;
2789
2790
2791         for (i = 0; i < rv2p_code_len; i += 8) {
2792                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2793                 rv2p_code++;
2794                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2795                 rv2p_code++;
2796
2797                 if (rv2p_proc == RV2P_PROC1) {
2798                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2799                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2800                 }
2801                 else {
2802                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2803                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2804                 }
2805         }
2806
2807         /* Reset the processor, un-stall is done later. */
2808         if (rv2p_proc == RV2P_PROC1) {
2809                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2810         }
2811         else {
2812                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2813         }
2814 }
2815
2816 static int
2817 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2818 {
2819         u32 offset;
2820         u32 val;
2821         int rc;
2822
2823         /* Halt the CPU. */
2824         val = REG_RD_IND(bp, cpu_reg->mode);
2825         val |= cpu_reg->mode_value_halt;
2826         REG_WR_IND(bp, cpu_reg->mode, val);
2827         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2828
2829         /* Load the Text area. */
2830         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2831         if (fw->gz_text) {
2832                 int j;
2833
2834                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2835                                        fw->gz_text_len);
2836                 if (rc < 0)
2837                         return rc;
2838
2839                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2840                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2841                 }
2842         }
2843
2844         /* Load the Data area. */
2845         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2846         if (fw->data) {
2847                 int j;
2848
2849                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2850                         REG_WR_IND(bp, offset, fw->data[j]);
2851                 }
2852         }
2853
2854         /* Load the SBSS area. */
2855         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2856         if (fw->sbss_len) {
2857                 int j;
2858
2859                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2860                         REG_WR_IND(bp, offset, 0);
2861                 }
2862         }
2863
2864         /* Load the BSS area. */
2865         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2866         if (fw->bss_len) {
2867                 int j;
2868
2869                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2870                         REG_WR_IND(bp, offset, 0);
2871                 }
2872         }
2873
2874         /* Load the Read-Only area. */
2875         offset = cpu_reg->spad_base +
2876                 (fw->rodata_addr - cpu_reg->mips_view_base);
2877         if (fw->rodata) {
2878                 int j;
2879
2880                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2881                         REG_WR_IND(bp, offset, fw->rodata[j]);
2882                 }
2883         }
2884
2885         /* Clear the pre-fetch instruction. */
2886         REG_WR_IND(bp, cpu_reg->inst, 0);
2887         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2888
2889         /* Start the CPU. */
2890         val = REG_RD_IND(bp, cpu_reg->mode);
2891         val &= ~cpu_reg->mode_value_halt;
2892         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2893         REG_WR_IND(bp, cpu_reg->mode, val);
2894
2895         return 0;
2896 }
2897
2898 static int
2899 bnx2_init_cpus(struct bnx2 *bp)
2900 {
2901         struct cpu_reg cpu_reg;
2902         struct fw_info *fw;
2903         int rc;
2904         void *text;
2905
2906         /* Initialize the RV2P processor. */
2907         text = vmalloc(FW_BUF_SIZE);
2908         if (!text)
2909                 return -ENOMEM;
2910         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2911         if (rc < 0)
2912                 goto init_cpu_err;
2913
2914         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2915
2916         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2917         if (rc < 0)
2918                 goto init_cpu_err;
2919
2920         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2921
2922         /* Initialize the RX Processor. */
2923         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2924         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2925         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2926         cpu_reg.state = BNX2_RXP_CPU_STATE;
2927         cpu_reg.state_value_clear = 0xffffff;
2928         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2929         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2930         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2931         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2932         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2933         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2934         cpu_reg.mips_view_base = 0x8000000;
2935
2936         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2937                 fw = &bnx2_rxp_fw_09;
2938         else
2939                 fw = &bnx2_rxp_fw_06;
2940
2941         fw->text = text;
2942         rc = load_cpu_fw(bp, &cpu_reg, fw);
2943         if (rc)
2944                 goto init_cpu_err;
2945
2946         /* Initialize the TX Processor. */
2947         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2948         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2949         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2950         cpu_reg.state = BNX2_TXP_CPU_STATE;
2951         cpu_reg.state_value_clear = 0xffffff;
2952         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2953         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2954         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2955         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2956         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2957         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2958         cpu_reg.mips_view_base = 0x8000000;
2959
2960         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2961                 fw = &bnx2_txp_fw_09;
2962         else
2963                 fw = &bnx2_txp_fw_06;
2964
2965         fw->text = text;
2966         rc = load_cpu_fw(bp, &cpu_reg, fw);
2967         if (rc)
2968                 goto init_cpu_err;
2969
2970         /* Initialize the TX Patch-up Processor. */
2971         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2972         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2973         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2974         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2975         cpu_reg.state_value_clear = 0xffffff;
2976         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2977         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2978         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2979         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2980         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2981         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2982         cpu_reg.mips_view_base = 0x8000000;
2983
2984         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2985                 fw = &bnx2_tpat_fw_09;
2986         else
2987                 fw = &bnx2_tpat_fw_06;
2988
2989         fw->text = text;
2990         rc = load_cpu_fw(bp, &cpu_reg, fw);
2991         if (rc)
2992                 goto init_cpu_err;
2993
2994         /* Initialize the Completion Processor. */
2995         cpu_reg.mode = BNX2_COM_CPU_MODE;
2996         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2997         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2998         cpu_reg.state = BNX2_COM_CPU_STATE;
2999         cpu_reg.state_value_clear = 0xffffff;
3000         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3001         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3002         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3003         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3004         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3005         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3006         cpu_reg.mips_view_base = 0x8000000;
3007
3008         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3009                 fw = &bnx2_com_fw_09;
3010         else
3011                 fw = &bnx2_com_fw_06;
3012
3013         fw->text = text;
3014         rc = load_cpu_fw(bp, &cpu_reg, fw);
3015         if (rc)
3016                 goto init_cpu_err;
3017
3018         /* Initialize the Command Processor. */
3019         cpu_reg.mode = BNX2_CP_CPU_MODE;
3020         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3021         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3022         cpu_reg.state = BNX2_CP_CPU_STATE;
3023         cpu_reg.state_value_clear = 0xffffff;
3024         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3025         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3026         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3027         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3028         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3029         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3030         cpu_reg.mips_view_base = 0x8000000;
3031
3032         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3033                 fw = &bnx2_cp_fw_09;
3034
3035                 fw->text = text;
3036                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3037                 if (rc)
3038                         goto init_cpu_err;
3039         }
3040 init_cpu_err:
3041         vfree(text);
3042         return rc;
3043 }
3044
3045 static int
3046 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3047 {
3048         u16 pmcsr;
3049
3050         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3051
3052         switch (state) {
3053         case PCI_D0: {
3054                 u32 val;
3055
3056                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3057                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3058                         PCI_PM_CTRL_PME_STATUS);
3059
3060                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3061                         /* delay required during transition out of D3hot */
3062                         msleep(20);
3063
3064                 val = REG_RD(bp, BNX2_EMAC_MODE);
3065                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3066                 val &= ~BNX2_EMAC_MODE_MPKT;
3067                 REG_WR(bp, BNX2_EMAC_MODE, val);
3068
3069                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3070                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3071                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3072                 break;
3073         }
3074         case PCI_D3hot: {
3075                 int i;
3076                 u32 val, wol_msg;
3077
3078                 if (bp->wol) {
3079                         u32 advertising;
3080                         u8 autoneg;
3081
3082                         autoneg = bp->autoneg;
3083                         advertising = bp->advertising;
3084
3085                         if (bp->phy_port == PORT_TP) {
3086                                 bp->autoneg = AUTONEG_SPEED;
3087                                 bp->advertising = ADVERTISED_10baseT_Half |
3088                                         ADVERTISED_10baseT_Full |
3089                                         ADVERTISED_100baseT_Half |
3090                                         ADVERTISED_100baseT_Full |
3091                                         ADVERTISED_Autoneg;
3092                         }
3093
3094                         spin_lock_bh(&bp->phy_lock);
3095                         bnx2_setup_phy(bp, bp->phy_port);
3096                         spin_unlock_bh(&bp->phy_lock);
3097
3098                         bp->autoneg = autoneg;
3099                         bp->advertising = advertising;
3100
3101                         bnx2_set_mac_addr(bp);
3102
3103                         val = REG_RD(bp, BNX2_EMAC_MODE);
3104
3105                         /* Enable port mode. */
3106                         val &= ~BNX2_EMAC_MODE_PORT;
3107                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3108                                BNX2_EMAC_MODE_ACPI_RCVD |
3109                                BNX2_EMAC_MODE_MPKT;
3110                         if (bp->phy_port == PORT_TP)
3111                                 val |= BNX2_EMAC_MODE_PORT_MII;
3112                         else {
3113                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3114                                 if (bp->line_speed == SPEED_2500)
3115                                         val |= BNX2_EMAC_MODE_25G_MODE;
3116                         }
3117
3118                         REG_WR(bp, BNX2_EMAC_MODE, val);
3119
3120                         /* receive all multicast */
3121                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3122                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3123                                        0xffffffff);
3124                         }
3125                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3126                                BNX2_EMAC_RX_MODE_SORT_MODE);
3127
3128                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3129                               BNX2_RPM_SORT_USER0_MC_EN;
3130                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3131                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3132                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3133                                BNX2_RPM_SORT_USER0_ENA);
3134
3135                         /* Need to enable EMAC and RPM for WOL. */
3136                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3137                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3138                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3139                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3140
3141                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3142                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3143                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3144
3145                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3146                 }
3147                 else {
3148                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3149                 }
3150
3151                 if (!(bp->flags & NO_WOL_FLAG))
3152                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3153
3154                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3155                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3156                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3157
3158                         if (bp->wol)
3159                                 pmcsr |= 3;
3160                 }
3161                 else {
3162                         pmcsr |= 3;
3163                 }
3164                 if (bp->wol) {
3165                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3166                 }
3167                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3168                                       pmcsr);
3169
3170                 /* No more memory access after this point until
3171                  * device is brought back to D0.
3172                  */
3173                 udelay(50);
3174                 break;
3175         }
3176         default:
3177                 return -EINVAL;
3178         }
3179         return 0;
3180 }
3181
3182 static int
3183 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3184 {
3185         u32 val;
3186         int j;
3187
3188         /* Request access to the flash interface. */
3189         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3190         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3191                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3192                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3193                         break;
3194
3195                 udelay(5);
3196         }
3197
3198         if (j >= NVRAM_TIMEOUT_COUNT)
3199                 return -EBUSY;
3200
3201         return 0;
3202 }
3203
3204 static int
3205 bnx2_release_nvram_lock(struct bnx2 *bp)
3206 {
3207         int j;
3208         u32 val;
3209
3210         /* Relinquish nvram interface. */
3211         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3212
3213         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3214                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3215                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3216                         break;
3217
3218                 udelay(5);
3219         }
3220
3221         if (j >= NVRAM_TIMEOUT_COUNT)
3222                 return -EBUSY;
3223
3224         return 0;
3225 }
3226
3227
3228 static int
3229 bnx2_enable_nvram_write(struct bnx2 *bp)
3230 {
3231         u32 val;
3232
3233         val = REG_RD(bp, BNX2_MISC_CFG);
3234         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3235
3236         if (bp->flash_info->flags & BNX2_NV_WREN) {
3237                 int j;
3238
3239                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3240                 REG_WR(bp, BNX2_NVM_COMMAND,
3241                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3242
3243                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3244                         udelay(5);
3245
3246                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3247                         if (val & BNX2_NVM_COMMAND_DONE)
3248                                 break;
3249                 }
3250
3251                 if (j >= NVRAM_TIMEOUT_COUNT)
3252                         return -EBUSY;
3253         }
3254         return 0;
3255 }
3256
3257 static void
3258 bnx2_disable_nvram_write(struct bnx2 *bp)
3259 {
3260         u32 val;
3261
3262         val = REG_RD(bp, BNX2_MISC_CFG);
3263         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3264 }
3265
3266
3267 static void
3268 bnx2_enable_nvram_access(struct bnx2 *bp)
3269 {
3270         u32 val;
3271
3272         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3273         /* Enable both bits, even on read. */
3274         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3275                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3276 }
3277
3278 static void
3279 bnx2_disable_nvram_access(struct bnx2 *bp)
3280 {
3281         u32 val;
3282
3283         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3284         /* Disable both bits, even after read. */
3285         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3286                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3287                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3288 }
3289
3290 static int
3291 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3292 {
3293         u32 cmd;
3294         int j;
3295
3296         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3297                 /* Buffered flash, no erase needed */
3298                 return 0;
3299
3300         /* Build an erase command */
3301         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3302               BNX2_NVM_COMMAND_DOIT;
3303
3304         /* Need to clear DONE bit separately. */
3305         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3306
3307         /* Address of the NVRAM to read from. */
3308         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3309
3310         /* Issue an erase command. */
3311         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3312
3313         /* Wait for completion. */
3314         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3315                 u32 val;
3316
3317                 udelay(5);
3318
3319                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3320                 if (val & BNX2_NVM_COMMAND_DONE)
3321                         break;
3322         }
3323
3324         if (j >= NVRAM_TIMEOUT_COUNT)
3325                 return -EBUSY;
3326
3327         return 0;
3328 }
3329
3330 static int
3331 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3332 {
3333         u32 cmd;
3334         int j;
3335
3336         /* Build the command word. */
3337         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3338
3339         /* Calculate an offset of a buffered flash, not needed for 5709. */
3340         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3341                 offset = ((offset / bp->flash_info->page_size) <<
3342                            bp->flash_info->page_bits) +
3343                           (offset % bp->flash_info->page_size);
3344         }
3345
3346         /* Need to clear DONE bit separately. */
3347         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3348
3349         /* Address of the NVRAM to read from. */
3350         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3351
3352         /* Issue a read command. */
3353         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3354
3355         /* Wait for completion. */
3356         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3357                 u32 val;
3358
3359                 udelay(5);
3360
3361                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3362                 if (val & BNX2_NVM_COMMAND_DONE) {
3363                         val = REG_RD(bp, BNX2_NVM_READ);
3364
3365                         val = be32_to_cpu(val);
3366                         memcpy(ret_val, &val, 4);
3367                         break;
3368                 }
3369         }
3370         if (j >= NVRAM_TIMEOUT_COUNT)
3371                 return -EBUSY;
3372
3373         return 0;
3374 }
3375
3376
3377 static int
3378 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3379 {
3380         u32 cmd, val32;
3381         int j;
3382
3383         /* Build the command word. */
3384         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3385
3386         /* Calculate an offset of a buffered flash, not needed for 5709. */
3387         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3388                 offset = ((offset / bp->flash_info->page_size) <<
3389                           bp->flash_info->page_bits) +
3390                          (offset % bp->flash_info->page_size);
3391         }
3392
3393         /* Need to clear DONE bit separately. */
3394         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3395
3396         memcpy(&val32, val, 4);
3397         val32 = cpu_to_be32(val32);
3398
3399         /* Write the data. */
3400         REG_WR(bp, BNX2_NVM_WRITE, val32);
3401
3402         /* Address of the NVRAM to write to. */
3403         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3404
3405         /* Issue the write command. */
3406         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3407
3408         /* Wait for completion. */
3409         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3410                 udelay(5);
3411
3412                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3413                         break;
3414         }
3415         if (j >= NVRAM_TIMEOUT_COUNT)
3416                 return -EBUSY;
3417
3418         return 0;
3419 }
3420
3421 static int
3422 bnx2_init_nvram(struct bnx2 *bp)
3423 {
3424         u32 val;
3425         int j, entry_count, rc = 0;
3426         struct flash_spec *flash;
3427
3428         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3429                 bp->flash_info = &flash_5709;
3430                 goto get_flash_size;
3431         }
3432
3433         /* Determine the selected interface. */
3434         val = REG_RD(bp, BNX2_NVM_CFG1);
3435
3436         entry_count = ARRAY_SIZE(flash_table);
3437
3438         if (val & 0x40000000) {
3439
3440                 /* Flash interface has been reconfigured */
3441                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3442                      j++, flash++) {
3443                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3444                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3445                                 bp->flash_info = flash;
3446                                 break;
3447                         }
3448                 }
3449         }
3450         else {
3451                 u32 mask;
3452                 /* Not yet been reconfigured */
3453
3454                 if (val & (1 << 23))
3455                         mask = FLASH_BACKUP_STRAP_MASK;
3456                 else
3457                         mask = FLASH_STRAP_MASK;
3458
3459                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3460                         j++, flash++) {
3461
3462                         if ((val & mask) == (flash->strapping & mask)) {
3463                                 bp->flash_info = flash;
3464
3465                                 /* Request access to the flash interface. */
3466                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3467                                         return rc;
3468
3469                                 /* Enable access to flash interface */
3470                                 bnx2_enable_nvram_access(bp);
3471
3472                                 /* Reconfigure the flash interface */
3473                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3474                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3475                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3476                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3477
3478                                 /* Disable access to flash interface */
3479                                 bnx2_disable_nvram_access(bp);
3480                                 bnx2_release_nvram_lock(bp);
3481
3482                                 break;
3483                         }
3484                 }
3485         } /* if (val & 0x40000000) */
3486
3487         if (j == entry_count) {
3488                 bp->flash_info = NULL;
3489                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3490                 return -ENODEV;
3491         }
3492
3493 get_flash_size:
3494         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3495         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3496         if (val)
3497                 bp->flash_size = val;
3498         else
3499                 bp->flash_size = bp->flash_info->total_size;
3500
3501         return rc;
3502 }
3503
3504 static int
3505 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3506                 int buf_size)
3507 {
3508         int rc = 0;
3509         u32 cmd_flags, offset32, len32, extra;
3510
3511         if (buf_size == 0)
3512                 return 0;
3513
3514         /* Request access to the flash interface. */
3515         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3516                 return rc;
3517
3518         /* Enable access to flash interface */
3519         bnx2_enable_nvram_access(bp);
3520
3521         len32 = buf_size;
3522         offset32 = offset;
3523         extra = 0;
3524
3525         cmd_flags = 0;
3526
3527         if (offset32 & 3) {
3528                 u8 buf[4];
3529                 u32 pre_len;
3530
3531                 offset32 &= ~3;
3532                 pre_len = 4 - (offset & 3);
3533
3534                 if (pre_len >= len32) {
3535                         pre_len = len32;
3536                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3537                                     BNX2_NVM_COMMAND_LAST;
3538                 }
3539                 else {
3540                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3541                 }
3542
3543                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3544
3545                 if (rc)
3546                         return rc;
3547
3548                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3549
3550                 offset32 += 4;
3551                 ret_buf += pre_len;
3552                 len32 -= pre_len;
3553         }
3554         if (len32 & 3) {
3555                 extra = 4 - (len32 & 3);
3556                 len32 = (len32 + 4) & ~3;
3557         }
3558
3559         if (len32 == 4) {
3560                 u8 buf[4];
3561
3562                 if (cmd_flags)
3563                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3564                 else
3565                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3566                                     BNX2_NVM_COMMAND_LAST;
3567
3568                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3569
3570                 memcpy(ret_buf, buf, 4 - extra);
3571         }
3572         else if (len32 > 0) {
3573                 u8 buf[4];
3574
3575                 /* Read the first word. */
3576                 if (cmd_flags)
3577                         cmd_flags = 0;
3578                 else
3579                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3580
3581                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3582
3583                 /* Advance to the next dword. */
3584                 offset32 += 4;
3585                 ret_buf += 4;
3586                 len32 -= 4;
3587
3588                 while (len32 > 4 && rc == 0) {
3589                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3590
3591                         /* Advance to the next dword. */
3592                         offset32 += 4;
3593                         ret_buf += 4;
3594                         len32 -= 4;
3595                 }
3596
3597                 if (rc)
3598                         return rc;
3599
3600                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3601                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3602
3603                 memcpy(ret_buf, buf, 4 - extra);
3604         }
3605
3606         /* Disable access to flash interface */
3607         bnx2_disable_nvram_access(bp);
3608
3609         bnx2_release_nvram_lock(bp);
3610
3611         return rc;
3612 }
3613
3614 static int
3615 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3616                 int buf_size)
3617 {
3618         u32 written, offset32, len32;
3619         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3620         int rc = 0;
3621         int align_start, align_end;
3622
3623         buf = data_buf;
3624         offset32 = offset;
3625         len32 = buf_size;
3626         align_start = align_end = 0;
3627
3628         if ((align_start = (offset32 & 3))) {
3629                 offset32 &= ~3;
3630                 len32 += align_start;
3631                 if (len32 < 4)
3632                         len32 = 4;
3633                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3634                         return rc;
3635         }
3636
3637         if (len32 & 3) {
3638                 align_end = 4 - (len32 & 3);
3639                 len32 += align_end;
3640                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3641                         return rc;
3642         }
3643
3644         if (align_start || align_end) {
3645                 align_buf = kmalloc(len32, GFP_KERNEL);
3646                 if (align_buf == NULL)
3647                         return -ENOMEM;
3648                 if (align_start) {
3649                         memcpy(align_buf, start, 4);
3650                 }
3651                 if (align_end) {
3652                         memcpy(align_buf + len32 - 4, end, 4);
3653                 }
3654                 memcpy(align_buf + align_start, data_buf, buf_size);
3655                 buf = align_buf;
3656         }
3657
3658         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3659                 flash_buffer = kmalloc(264, GFP_KERNEL);
3660                 if (flash_buffer == NULL) {
3661                         rc = -ENOMEM;
3662                         goto nvram_write_end;
3663                 }
3664         }
3665
3666         written = 0;
3667         while ((written < len32) && (rc == 0)) {
3668                 u32 page_start, page_end, data_start, data_end;
3669                 u32 addr, cmd_flags;
3670                 int i;
3671
3672                 /* Find the page_start addr */
3673                 page_start = offset32 + written;
3674                 page_start -= (page_start % bp->flash_info->page_size);
3675                 /* Find the page_end addr */
3676                 page_end = page_start + bp->flash_info->page_size;
3677                 /* Find the data_start addr */
3678                 data_start = (written == 0) ? offset32 : page_start;
3679                 /* Find the data_end addr */
3680                 data_end = (page_end > offset32 + len32) ?
3681                         (offset32 + len32) : page_end;
3682
3683                 /* Request access to the flash interface. */
3684                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3685                         goto nvram_write_end;
3686
3687                 /* Enable access to flash interface */
3688                 bnx2_enable_nvram_access(bp);
3689
3690                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3691                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3692                         int j;
3693
3694                         /* Read the whole page into the buffer
3695                          * (non-buffer flash only) */
3696                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3697                                 if (j == (bp->flash_info->page_size - 4)) {
3698                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3699                                 }
3700                                 rc = bnx2_nvram_read_dword(bp,
3701                                         page_start + j,
3702                                         &flash_buffer[j],
3703                                         cmd_flags);
3704
3705                                 if (rc)
3706                                         goto nvram_write_end;
3707
3708                                 cmd_flags = 0;
3709                         }
3710                 }
3711
3712                 /* Enable writes to flash interface (unlock write-protect) */
3713                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3714                         goto nvram_write_end;
3715
3716                 /* Loop to write back the buffer data from page_start to
3717                  * data_start */
3718                 i = 0;
3719                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3720                         /* Erase the page */
3721                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3722                                 goto nvram_write_end;
3723
3724                         /* Re-enable the write again for the actual write */
3725                         bnx2_enable_nvram_write(bp);
3726
3727                         for (addr = page_start; addr < data_start;
3728                                 addr += 4, i += 4) {
3729
3730                                 rc = bnx2_nvram_write_dword(bp, addr,
3731                                         &flash_buffer[i], cmd_flags);
3732
3733                                 if (rc != 0)
3734                                         goto nvram_write_end;
3735
3736                                 cmd_flags = 0;
3737                         }
3738                 }
3739
3740                 /* Loop to write the new data from data_start to data_end */
3741                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3742                         if ((addr == page_end - 4) ||
3743                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3744                                  (addr == data_end - 4))) {
3745
3746                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3747                         }
3748                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3749                                 cmd_flags);
3750
3751                         if (rc != 0)
3752                                 goto nvram_write_end;
3753
3754                         cmd_flags = 0;
3755                         buf += 4;
3756                 }
3757
3758                 /* Loop to write back the buffer data from data_end
3759                  * to page_end */
3760                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3761                         for (addr = data_end; addr < page_end;
3762                                 addr += 4, i += 4) {
3763
3764                                 if (addr == page_end-4) {
3765                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3766                                 }
3767                                 rc = bnx2_nvram_write_dword(bp, addr,
3768                                         &flash_buffer[i], cmd_flags);
3769
3770                                 if (rc != 0)
3771                                         goto nvram_write_end;
3772
3773                                 cmd_flags = 0;
3774                         }
3775                 }
3776
3777                 /* Disable writes to flash interface (lock write-protect) */
3778                 bnx2_disable_nvram_write(bp);
3779
3780                 /* Disable access to flash interface */
3781                 bnx2_disable_nvram_access(bp);
3782                 bnx2_release_nvram_lock(bp);
3783
3784                 /* Increment written */
3785                 written += data_end - data_start;
3786         }
3787
3788 nvram_write_end:
3789         kfree(flash_buffer);
3790         kfree(align_buf);
3791         return rc;
3792 }
3793
3794 static void
3795 bnx2_init_remote_phy(struct bnx2 *bp)
3796 {
3797         u32 val;
3798
3799         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3800         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3801                 return;
3802
3803         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3804         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3805                 return;
3806
3807         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3808                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3809
3810                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3811                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3812                         bp->phy_port = PORT_FIBRE;
3813                 else
3814                         bp->phy_port = PORT_TP;
3815
3816                 if (netif_running(bp->dev)) {
3817                         u32 sig;
3818
3819                         if (val & BNX2_LINK_STATUS_LINK_UP) {
3820                                 bp->link_up = 1;
3821                                 netif_carrier_on(bp->dev);
3822                         } else {
3823                                 bp->link_up = 0;
3824                                 netif_carrier_off(bp->dev);
3825                         }
3826                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3827                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3828                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3829                                    sig);
3830                 }
3831         }
3832 }
3833
3834 static int
3835 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3836 {
3837         u32 val;
3838         int i, rc = 0;
3839         u8 old_port;
3840
3841         /* Wait for the current PCI transaction to complete before
3842          * issuing a reset. */
3843         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3844                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3845                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3846                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3847                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3848         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3849         udelay(5);
3850
3851         /* Wait for the firmware to tell us it is ok to issue a reset. */
3852         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3853
3854         /* Deposit a driver reset signature so the firmware knows that
3855          * this is a soft reset. */
3856         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3857                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3858
3859         /* Do a dummy read to force the chip to complete all current transaction
3860          * before we issue a reset. */
3861         val = REG_RD(bp, BNX2_MISC_ID);
3862
3863         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3864                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3865                 REG_RD(bp, BNX2_MISC_COMMAND);
3866                 udelay(5);
3867
3868                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3869                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3870
3871                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3872
3873         } else {
3874                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3875                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3876                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3877
3878                 /* Chip reset. */
3879                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3880
3881                 /* Reading back any register after chip reset will hang the
3882                  * bus on 5706 A0 and A1.  The msleep below provides plenty
3883                  * of margin for write posting.
3884                  */
3885                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3886                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
3887                         msleep(20);
3888
3889                 /* Reset takes approximate 30 usec */
3890                 for (i = 0; i < 10; i++) {
3891                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3892                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3893                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3894                                 break;
3895                         udelay(10);
3896                 }
3897
3898                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3899                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3900                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3901                         return -EBUSY;
3902                 }
3903         }
3904
3905         /* Make sure byte swapping is properly configured. */
3906         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3907         if (val != 0x01020304) {
3908                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3909                 return -ENODEV;
3910         }
3911
3912         /* Wait for the firmware to finish its initialization. */
3913         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3914         if (rc)
3915                 return rc;
3916
3917         spin_lock_bh(&bp->phy_lock);
3918         old_port = bp->phy_port;
3919         bnx2_init_remote_phy(bp);
3920         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
3921                 bnx2_set_default_remote_link(bp);
3922         spin_unlock_bh(&bp->phy_lock);
3923
3924         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3925                 /* Adjust the voltage regular to two steps lower.  The default
3926                  * of this register is 0x0000000e. */
3927                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3928
3929                 /* Remove bad rbuf memory from the free pool. */
3930                 rc = bnx2_alloc_bad_rbuf(bp);
3931         }
3932
3933         return rc;
3934 }
3935
3936 static int
3937 bnx2_init_chip(struct bnx2 *bp)
3938 {
3939         u32 val;
3940         int rc;
3941
3942         /* Make sure the interrupt is not active. */
3943         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3944
3945         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3946               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3947 #ifdef __BIG_ENDIAN
3948               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3949 #endif
3950               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3951               DMA_READ_CHANS << 12 |
3952               DMA_WRITE_CHANS << 16;
3953
3954         val |= (0x2 << 20) | (1 << 11);
3955
3956         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3957                 val |= (1 << 23);
3958
3959         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3960             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3961                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3962
3963         REG_WR(bp, BNX2_DMA_CONFIG, val);
3964
3965         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3966                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3967                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3968                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3969         }
3970
3971         if (bp->flags & PCIX_FLAG) {
3972                 u16 val16;
3973
3974                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3975                                      &val16);
3976                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3977                                       val16 & ~PCI_X_CMD_ERO);
3978         }
3979
3980         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3981                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3982                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3983                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3984
3985         /* Initialize context mapping and zero out the quick contexts.  The
3986          * context block must have already been enabled. */
3987         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3988                 rc = bnx2_init_5709_context(bp);
3989                 if (rc)
3990                         return rc;
3991         } else
3992                 bnx2_init_context(bp);
3993
3994         if ((rc = bnx2_init_cpus(bp)) != 0)
3995                 return rc;
3996
3997         bnx2_init_nvram(bp);
3998
3999         bnx2_set_mac_addr(bp);
4000
4001         val = REG_RD(bp, BNX2_MQ_CONFIG);
4002         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4003         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4004         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4005                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4006
4007         REG_WR(bp, BNX2_MQ_CONFIG, val);
4008
4009         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4010         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4011         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4012
4013         val = (BCM_PAGE_BITS - 8) << 24;
4014         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4015
4016         /* Configure page size. */
4017         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4018         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4019         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4020         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4021
4022         val = bp->mac_addr[0] +
4023               (bp->mac_addr[1] << 8) +
4024               (bp->mac_addr[2] << 16) +
4025               bp->mac_addr[3] +
4026               (bp->mac_addr[4] << 8) +
4027               (bp->mac_addr[5] << 16);
4028         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4029
4030         /* Program the MTU.  Also include 4 bytes for CRC32. */
4031         val = bp->dev->mtu + ETH_HLEN + 4;
4032         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4033                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4034         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4035
4036         bp->last_status_idx = 0;
4037         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4038
4039         /* Set up how to generate a link change interrupt. */
4040         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4041
4042         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4043                (u64) bp->status_blk_mapping & 0xffffffff);
4044         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4045
4046         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4047                (u64) bp->stats_blk_mapping & 0xffffffff);
4048         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4049                (u64) bp->stats_blk_mapping >> 32);
4050
4051         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4052                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4053
4054         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4055                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4056
4057         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4058                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4059
4060         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4061
4062         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4063
4064         REG_WR(bp, BNX2_HC_COM_TICKS,
4065                (bp->com_ticks_int << 16) | bp->com_ticks);
4066
4067         REG_WR(bp, BNX2_HC_CMD_TICKS,
4068                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4069
4070         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4071                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4072         else
4073                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4074         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4075
4076         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4077                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4078         else {
4079                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4080                       BNX2_HC_CONFIG_COLLECT_STATS;
4081         }
4082
4083         if (bp->flags & ONE_SHOT_MSI_FLAG)
4084                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4085
4086         REG_WR(bp, BNX2_HC_CONFIG, val);
4087
4088         /* Clear internal stats counters. */
4089         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4090
4091         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4092
4093         /* Initialize the receive filter. */
4094         bnx2_set_rx_mode(bp->dev);
4095
4096         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4097                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4098                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4099                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4100         }
4101         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4102                           0);
4103
4104         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4105         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4106
4107         udelay(20);
4108
4109         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4110
4111         return rc;
4112 }
4113
4114 static void
4115 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4116 {
4117         u32 val, offset0, offset1, offset2, offset3;
4118
4119         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4120                 offset0 = BNX2_L2CTX_TYPE_XI;
4121                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4122                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4123                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4124         } else {
4125                 offset0 = BNX2_L2CTX_TYPE;
4126                 offset1 = BNX2_L2CTX_CMD_TYPE;
4127                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4128                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4129         }
4130         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4131         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4132
4133         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4134         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4135
4136         val = (u64) bp->tx_desc_mapping >> 32;
4137         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4138
4139         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4140         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4141 }
4142
4143 static void
4144 bnx2_init_tx_ring(struct bnx2 *bp)
4145 {
4146         struct tx_bd *txbd;
4147         u32 cid;
4148
4149         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4150
4151         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4152
4153         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4154         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4155
4156         bp->tx_prod = 0;
4157         bp->tx_cons = 0;
4158         bp->hw_tx_cons = 0;
4159         bp->tx_prod_bseq = 0;
4160
4161         cid = TX_CID;
4162         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4163         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4164
4165         bnx2_init_tx_context(bp, cid);
4166 }
4167
4168 static void
4169 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4170                      int num_rings)
4171 {
4172         int i;
4173         struct rx_bd *rxbd;
4174
4175         for (i = 0; i < num_rings; i++) {
4176                 int j;
4177
4178                 rxbd = &rx_ring[i][0];
4179                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4180                         rxbd->rx_bd_len = buf_size;
4181                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4182                 }
4183                 if (i == (num_rings - 1))
4184                         j = 0;
4185                 else
4186                         j = i + 1;
4187                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4188                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4189         }
4190 }
4191
4192 static void
4193 bnx2_init_rx_ring(struct bnx2 *bp)
4194 {
4195         int i;
4196         u16 prod, ring_prod;
4197         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4198
4199         bp->rx_prod = 0;
4200         bp->rx_cons = 0;
4201         bp->rx_prod_bseq = 0;
4202
4203         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4204                              bp->rx_buf_use_size, bp->rx_max_ring);
4205
4206         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4207
4208         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4209         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4210         val |= 0x02 << 8;
4211         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4212
4213         val = (u64) bp->rx_desc_mapping[0] >> 32;
4214         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4215
4216         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4217         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4218
4219         ring_prod = prod = bp->rx_prod;
4220         for (i = 0; i < bp->rx_ring_size; i++) {
4221                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4222                         break;
4223                 }
4224                 prod = NEXT_RX_BD(prod);
4225                 ring_prod = RX_RING_IDX(prod);
4226         }
4227         bp->rx_prod = prod;
4228
4229         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4230
4231         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4232 }
4233
4234 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4235 {
4236         u32 max, num_rings = 1;
4237
4238         while (ring_size > MAX_RX_DESC_CNT) {
4239                 ring_size -= MAX_RX_DESC_CNT;
4240                 num_rings++;
4241         }
4242         /* round to next power of 2 */
4243         max = max_size;
4244         while ((max & num_rings) == 0)
4245                 max >>= 1;
4246
4247         if (num_rings != max)
4248                 max <<= 1;
4249
4250         return max;
4251 }
4252
4253 static void
4254 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4255 {
4256         u32 rx_size;
4257
4258         /* 8 for CRC and VLAN */
4259         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4260
4261         bp->rx_copy_thresh = RX_COPY_THRESH;
4262
4263         bp->rx_buf_use_size = rx_size;
4264         /* hw alignment */
4265         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4266         bp->rx_ring_size = size;
4267         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4268         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4269 }
4270
4271 static void
4272 bnx2_free_tx_skbs(struct bnx2 *bp)
4273 {
4274         int i;
4275
4276         if (bp->tx_buf_ring == NULL)
4277                 return;
4278
4279         for (i = 0; i < TX_DESC_CNT; ) {
4280                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4281                 struct sk_buff *skb = tx_buf->skb;
4282                 int j, last;
4283
4284                 if (skb == NULL) {
4285                         i++;
4286                         continue;
4287                 }
4288
4289                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4290                         skb_headlen(skb), PCI_DMA_TODEVICE);
4291
4292                 tx_buf->skb = NULL;
4293
4294                 last = skb_shinfo(skb)->nr_frags;
4295                 for (j = 0; j < last; j++) {
4296                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4297                         pci_unmap_page(bp->pdev,
4298                                 pci_unmap_addr(tx_buf, mapping),
4299                                 skb_shinfo(skb)->frags[j].size,
4300                                 PCI_DMA_TODEVICE);
4301                 }
4302                 dev_kfree_skb(skb);
4303                 i += j + 1;
4304         }
4305
4306 }
4307
4308 static void
4309 bnx2_free_rx_skbs(struct bnx2 *bp)
4310 {
4311         int i;
4312
4313         if (bp->rx_buf_ring == NULL)
4314                 return;
4315
4316         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4317                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4318                 struct sk_buff *skb = rx_buf->skb;
4319
4320                 if (skb == NULL)
4321                         continue;
4322
4323                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4324                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4325
4326                 rx_buf->skb = NULL;
4327
4328                 dev_kfree_skb(skb);
4329         }
4330 }
4331
4332 static void
4333 bnx2_free_skbs(struct bnx2 *bp)
4334 {
4335         bnx2_free_tx_skbs(bp);
4336         bnx2_free_rx_skbs(bp);
4337 }
4338
4339 static int
4340 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4341 {
4342         int rc;
4343
4344         rc = bnx2_reset_chip(bp, reset_code);
4345         bnx2_free_skbs(bp);
4346         if (rc)
4347                 return rc;
4348
4349         if ((rc = bnx2_init_chip(bp)) != 0)
4350                 return rc;
4351
4352         bnx2_init_tx_ring(bp);
4353         bnx2_init_rx_ring(bp);
4354         return 0;
4355 }
4356
4357 static int
4358 bnx2_init_nic(struct bnx2 *bp)
4359 {
4360         int rc;
4361
4362         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4363                 return rc;
4364
4365         spin_lock_bh(&bp->phy_lock);
4366         bnx2_init_phy(bp);
4367         bnx2_set_link(bp);
4368         spin_unlock_bh(&bp->phy_lock);
4369         return 0;
4370 }
4371
4372 static int
4373 bnx2_test_registers(struct bnx2 *bp)
4374 {
4375         int ret;
4376         int i, is_5709;
4377         static const struct {
4378                 u16   offset;
4379                 u16   flags;
4380 #define BNX2_FL_NOT_5709        1
4381                 u32   rw_mask;
4382                 u32   ro_mask;
4383         } reg_tbl[] = {
4384                 { 0x006c, 0, 0x00000000, 0x0000003f },
4385                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4386                 { 0x0094, 0, 0x00000000, 0x00000000 },
4387
4388                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4389                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4390                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4391                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4392                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4393                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4394                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4395                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4396                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4397
4398                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4399                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4400                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4401                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4402                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4403                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4404
4405                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4406                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4407                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4408
4409                 { 0x1000, 0, 0x00000000, 0x00000001 },
4410                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4411
4412                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4413                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4414                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4415                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4416                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4417                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4418                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4419                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4420                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4421                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4422
4423                 { 0x1800, 0, 0x00000000, 0x00000001 },
4424                 { 0x1804, 0, 0x00000000, 0x00000003 },
4425
4426                 { 0x2800, 0, 0x00000000, 0x00000001 },
4427                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4428                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4429                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4430                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4431                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4432                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4433                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4434                 { 0x2840, 0, 0x00000000, 0xffffffff },
4435                 { 0x2844, 0, 0x00000000, 0xffffffff },
4436                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4437                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4438
4439                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4440                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4441
4442                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4443                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4444                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4445                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4446                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4447                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4448                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4449                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4450                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4451
4452                 { 0x5004, 0, 0x00000000, 0x0000007f },
4453                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4454
4455                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4456                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4457                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4458                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4459                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4460                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4461                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4462                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4463                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4464
4465                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4466                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4467                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4468                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4469                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4470                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4471                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4472                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4473                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4474                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4475                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4476                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4477                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4478                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4479                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4480                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4481                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4482                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4483                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4484                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4485                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4486                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4487                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4488
4489                 { 0xffff, 0, 0x00000000, 0x00000000 },
4490         };
4491
4492         ret = 0;
4493         is_5709 = 0;
4494         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4495                 is_5709 = 1;
4496
4497         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4498                 u32 offset, rw_mask, ro_mask, save_val, val;
4499                 u16 flags = reg_tbl[i].flags;
4500
4501                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4502                         continue;
4503
4504                 offset = (u32) reg_tbl[i].offset;
4505                 rw_mask = reg_tbl[i].rw_mask;
4506                 ro_mask = reg_tbl[i].ro_mask;
4507
4508                 save_val = readl(bp->regview + offset);
4509
4510                 writel(0, bp->regview + offset);
4511
4512                 val = readl(bp->regview + offset);
4513                 if ((val & rw_mask) != 0) {
4514                         goto reg_test_err;
4515                 }
4516
4517                 if ((val & ro_mask) != (save_val & ro_mask)) {
4518                         goto reg_test_err;
4519                 }
4520
4521                 writel(0xffffffff, bp->regview + offset);
4522
4523                 val = readl(bp->regview + offset);
4524                 if ((val & rw_mask) != rw_mask) {
4525                         goto reg_test_err;
4526                 }
4527
4528                 if ((val & ro_mask) != (save_val & ro_mask)) {
4529                         goto reg_test_err;
4530                 }
4531
4532                 writel(save_val, bp->regview + offset);
4533                 continue;
4534
4535 reg_test_err:
4536                 writel(save_val, bp->regview + offset);
4537                 ret = -ENODEV;
4538                 break;
4539         }
4540         return ret;
4541 }
4542
4543 static int
4544 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4545 {
4546         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4547                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4548         int i;
4549
4550         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4551                 u32 offset;
4552
4553                 for (offset = 0; offset < size; offset += 4) {
4554
4555                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4556
4557                         if (REG_RD_IND(bp, start + offset) !=
4558                                 test_pattern[i]) {
4559                                 return -ENODEV;
4560                         }
4561                 }
4562         }
4563         return 0;
4564 }
4565
4566 static int
4567 bnx2_test_memory(struct bnx2 *bp)
4568 {
4569         int ret = 0;
4570         int i;
4571         static struct mem_entry {
4572                 u32   offset;
4573                 u32   len;
4574         } mem_tbl_5706[] = {
4575                 { 0x60000,  0x4000 },
4576                 { 0xa0000,  0x3000 },
4577                 { 0xe0000,  0x4000 },
4578                 { 0x120000, 0x4000 },
4579                 { 0x1a0000, 0x4000 },
4580                 { 0x160000, 0x4000 },
4581                 { 0xffffffff, 0    },
4582         },
4583         mem_tbl_5709[] = {
4584                 { 0x60000,  0x4000 },
4585                 { 0xa0000,  0x3000 },
4586                 { 0xe0000,  0x4000 },
4587                 { 0x120000, 0x4000 },
4588                 { 0x1a0000, 0x4000 },
4589                 { 0xffffffff, 0    },
4590         };
4591         struct mem_entry *mem_tbl;
4592
4593         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4594                 mem_tbl = mem_tbl_5709;
4595         else
4596                 mem_tbl = mem_tbl_5706;
4597
4598         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4599                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4600                         mem_tbl[i].len)) != 0) {
4601                         return ret;
4602                 }
4603         }
4604
4605         return ret;
4606 }
4607
4608 #define BNX2_MAC_LOOPBACK       0
4609 #define BNX2_PHY_LOOPBACK       1
4610
4611 static int
4612 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4613 {
4614         unsigned int pkt_size, num_pkts, i;
4615         struct sk_buff *skb, *rx_skb;
4616         unsigned char *packet;
4617         u16 rx_start_idx, rx_idx;
4618         dma_addr_t map;
4619         struct tx_bd *txbd;
4620         struct sw_bd *rx_buf;
4621         struct l2_fhdr *rx_hdr;
4622         int ret = -ENODEV;
4623
4624         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4625                 bp->loopback = MAC_LOOPBACK;
4626                 bnx2_set_mac_loopback(bp);
4627         }
4628         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4629                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4630                         return 0;
4631
4632                 bp->loopback = PHY_LOOPBACK;
4633                 bnx2_set_phy_loopback(bp);
4634         }
4635         else
4636                 return -EINVAL;
4637
4638         pkt_size = 1514;
4639         skb = netdev_alloc_skb(bp->dev, pkt_size);
4640         if (!skb)
4641                 return -ENOMEM;
4642         packet = skb_put(skb, pkt_size);
4643         memcpy(packet, bp->dev->dev_addr, 6);
4644         memset(packet + 6, 0x0, 8);
4645         for (i = 14; i < pkt_size; i++)
4646                 packet[i] = (unsigned char) (i & 0xff);
4647
4648         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4649                 PCI_DMA_TODEVICE);
4650
4651         REG_WR(bp, BNX2_HC_COMMAND,
4652                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4653
4654         REG_RD(bp, BNX2_HC_COMMAND);
4655
4656         udelay(5);
4657         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4658
4659         num_pkts = 0;
4660
4661         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4662
4663         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4664         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4665         txbd->tx_bd_mss_nbytes = pkt_size;
4666         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4667
4668         num_pkts++;
4669         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4670         bp->tx_prod_bseq += pkt_size;
4671
4672         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4673         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4674
4675         udelay(100);
4676
4677         REG_WR(bp, BNX2_HC_COMMAND,
4678                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4679
4680         REG_RD(bp, BNX2_HC_COMMAND);
4681
4682         udelay(5);
4683
4684         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4685         dev_kfree_skb(skb);
4686
4687         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4688                 goto loopback_test_done;
4689         }
4690
4691         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4692         if (rx_idx != rx_start_idx + num_pkts) {
4693                 goto loopback_test_done;
4694         }
4695
4696         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4697         rx_skb = rx_buf->skb;
4698
4699         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4700         skb_reserve(rx_skb, bp->rx_offset);
4701
4702         pci_dma_sync_single_for_cpu(bp->pdev,
4703                 pci_unmap_addr(rx_buf, mapping),
4704                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4705
4706         if (rx_hdr->l2_fhdr_status &
4707                 (L2_FHDR_ERRORS_BAD_CRC |
4708                 L2_FHDR_ERRORS_PHY_DECODE |
4709                 L2_FHDR_ERRORS_ALIGNMENT |
4710                 L2_FHDR_ERRORS_TOO_SHORT |
4711                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4712
4713                 goto loopback_test_done;
4714         }
4715
4716         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4717                 goto loopback_test_done;
4718         }
4719
4720         for (i = 14; i < pkt_size; i++) {
4721                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4722                         goto loopback_test_done;
4723                 }
4724         }
4725
4726         ret = 0;
4727
4728 loopback_test_done:
4729         bp->loopback = 0;
4730         return ret;
4731 }
4732
4733 #define BNX2_MAC_LOOPBACK_FAILED        1
4734 #define BNX2_PHY_LOOPBACK_FAILED        2
4735 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4736                                          BNX2_PHY_LOOPBACK_FAILED)
4737
4738 static int
4739 bnx2_test_loopback(struct bnx2 *bp)
4740 {
4741         int rc = 0;
4742
4743         if (!netif_running(bp->dev))
4744                 return BNX2_LOOPBACK_FAILED;
4745
4746         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4747         spin_lock_bh(&bp->phy_lock);
4748         bnx2_init_phy(bp);
4749         spin_unlock_bh(&bp->phy_lock);
4750         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4751                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4752         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4753                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4754         return rc;
4755 }
4756
4757 #define NVRAM_SIZE 0x200
4758 #define CRC32_RESIDUAL 0xdebb20e3
4759
4760 static int
4761 bnx2_test_nvram(struct bnx2 *bp)
4762 {
4763         u32 buf[NVRAM_SIZE / 4];
4764         u8 *data = (u8 *) buf;
4765         int rc = 0;
4766         u32 magic, csum;
4767
4768         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4769                 goto test_nvram_done;
4770
4771         magic = be32_to_cpu(buf[0]);
4772         if (magic != 0x669955aa) {
4773                 rc = -ENODEV;
4774                 goto test_nvram_done;
4775         }
4776
4777         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4778                 goto test_nvram_done;
4779
4780         csum = ether_crc_le(0x100, data);
4781         if (csum != CRC32_RESIDUAL) {
4782                 rc = -ENODEV;
4783                 goto test_nvram_done;
4784         }
4785
4786         csum = ether_crc_le(0x100, data + 0x100);
4787         if (csum != CRC32_RESIDUAL) {
4788                 rc = -ENODEV;
4789         }
4790
4791 test_nvram_done:
4792         return rc;
4793 }
4794
4795 static int
4796 bnx2_test_link(struct bnx2 *bp)
4797 {
4798         u32 bmsr;
4799
4800         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4801                 if (bp->link_up)
4802                         return 0;
4803                 return -ENODEV;
4804         }
4805         spin_lock_bh(&bp->phy_lock);
4806         bnx2_enable_bmsr1(bp);
4807         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4808         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4809         bnx2_disable_bmsr1(bp);
4810         spin_unlock_bh(&bp->phy_lock);
4811
4812         if (bmsr & BMSR_LSTATUS) {
4813                 return 0;
4814         }
4815         return -ENODEV;
4816 }
4817
4818 static int
4819 bnx2_test_intr(struct bnx2 *bp)
4820 {
4821         int i;
4822         u16 status_idx;
4823
4824         if (!netif_running(bp->dev))
4825                 return -ENODEV;
4826
4827         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4828
4829         /* This register is not touched during run-time. */
4830         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4831         REG_RD(bp, BNX2_HC_COMMAND);
4832
4833         for (i = 0; i < 10; i++) {
4834                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4835                         status_idx) {
4836
4837                         break;
4838                 }
4839
4840                 msleep_interruptible(10);
4841         }
4842         if (i < 10)
4843                 return 0;
4844
4845         return -ENODEV;
4846 }
4847
4848 static void
4849 bnx2_5706_serdes_timer(struct bnx2 *bp)
4850 {
4851         spin_lock(&bp->phy_lock);
4852         if (bp->serdes_an_pending)
4853                 bp->serdes_an_pending--;
4854         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4855                 u32 bmcr;
4856
4857                 bp->current_interval = bp->timer_interval;
4858
4859                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4860
4861                 if (bmcr & BMCR_ANENABLE) {
4862                         u32 phy1, phy2;
4863
4864                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4865                         bnx2_read_phy(bp, 0x1c, &phy1);
4866
4867                         bnx2_write_phy(bp, 0x17, 0x0f01);
4868                         bnx2_read_phy(bp, 0x15, &phy2);
4869                         bnx2_write_phy(bp, 0x17, 0x0f01);
4870                         bnx2_read_phy(bp, 0x15, &phy2);
4871
4872                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4873                                 !(phy2 & 0x20)) {       /* no CONFIG */
4874
4875                                 bmcr &= ~BMCR_ANENABLE;
4876                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4877                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4878                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4879                         }
4880                 }
4881         }
4882         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4883                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4884                 u32 phy2;
4885
4886                 bnx2_write_phy(bp, 0x17, 0x0f01);
4887                 bnx2_read_phy(bp, 0x15, &phy2);
4888                 if (phy2 & 0x20) {
4889                         u32 bmcr;
4890
4891                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4892                         bmcr |= BMCR_ANENABLE;
4893                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4894
4895                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4896                 }
4897         } else
4898                 bp->current_interval = bp->timer_interval;
4899
4900         spin_unlock(&bp->phy_lock);
4901 }
4902
4903 static void
4904 bnx2_5708_serdes_timer(struct bnx2 *bp)
4905 {
4906         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4907                 return;
4908
4909         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4910                 bp->serdes_an_pending = 0;
4911                 return;
4912         }
4913
4914         spin_lock(&bp->phy_lock);
4915         if (bp->serdes_an_pending)
4916                 bp->serdes_an_pending--;
4917         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4918                 u32 bmcr;
4919
4920                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4921                 if (bmcr & BMCR_ANENABLE) {
4922                         bnx2_enable_forced_2g5(bp);
4923                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4924                 } else {
4925                         bnx2_disable_forced_2g5(bp);
4926                         bp->serdes_an_pending = 2;
4927                         bp->current_interval = bp->timer_interval;
4928                 }
4929
4930         } else
4931                 bp->current_interval = bp->timer_interval;
4932
4933         spin_unlock(&bp->phy_lock);
4934 }
4935
4936 static void
4937 bnx2_timer(unsigned long data)
4938 {
4939         struct bnx2 *bp = (struct bnx2 *) data;
4940
4941         if (!netif_running(bp->dev))
4942                 return;
4943
4944         if (atomic_read(&bp->intr_sem) != 0)
4945                 goto bnx2_restart_timer;
4946
4947         bnx2_send_heart_beat(bp);
4948
4949         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4950
4951         /* workaround occasional corrupted counters */
4952         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4953                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4954                                             BNX2_HC_COMMAND_STATS_NOW);
4955
4956         if (bp->phy_flags & PHY_SERDES_FLAG) {
4957                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4958                         bnx2_5706_serdes_timer(bp);
4959                 else
4960                         bnx2_5708_serdes_timer(bp);
4961         }
4962
4963 bnx2_restart_timer:
4964         mod_timer(&bp->timer, jiffies + bp->current_interval);
4965 }
4966
4967 static int
4968 bnx2_request_irq(struct bnx2 *bp)
4969 {
4970         struct net_device *dev = bp->dev;
4971         int rc = 0;
4972
4973         if (bp->flags & USING_MSI_FLAG) {
4974                 irq_handler_t   fn = bnx2_msi;
4975
4976                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4977                         fn = bnx2_msi_1shot;
4978
4979                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4980         } else
4981                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4982                                  IRQF_SHARED, dev->name, dev);
4983         return rc;
4984 }
4985
4986 static void
4987 bnx2_free_irq(struct bnx2 *bp)
4988 {
4989         struct net_device *dev = bp->dev;
4990
4991         if (bp->flags & USING_MSI_FLAG) {
4992                 free_irq(bp->pdev->irq, dev);
4993                 pci_disable_msi(bp->pdev);
4994                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4995         } else
4996                 free_irq(bp->pdev->irq, dev);
4997 }
4998
4999 /* Called with rtnl_lock */
5000 static int
5001 bnx2_open(struct net_device *dev)
5002 {
5003         struct bnx2 *bp = netdev_priv(dev);
5004         int rc;
5005
5006         netif_carrier_off(dev);
5007
5008         bnx2_set_power_state(bp, PCI_D0);
5009         bnx2_disable_int(bp);
5010
5011         rc = bnx2_alloc_mem(bp);
5012         if (rc)
5013                 return rc;
5014
5015         napi_enable(&bp->napi);
5016
5017         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5018                 if (pci_enable_msi(bp->pdev) == 0) {
5019                         bp->flags |= USING_MSI_FLAG;
5020                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5021                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5022                 }
5023         }
5024         rc = bnx2_request_irq(bp);
5025
5026         if (rc) {
5027                 napi_disable(&bp->napi);
5028                 bnx2_free_mem(bp);
5029                 return rc;
5030         }
5031
5032         rc = bnx2_init_nic(bp);
5033
5034         if (rc) {
5035                 napi_disable(&bp->napi);
5036                 bnx2_free_irq(bp);
5037                 bnx2_free_skbs(bp);
5038                 bnx2_free_mem(bp);
5039                 return rc;
5040         }
5041
5042         mod_timer(&bp->timer, jiffies + bp->current_interval);
5043
5044         atomic_set(&bp->intr_sem, 0);
5045
5046         bnx2_enable_int(bp);
5047
5048         if (bp->flags & USING_MSI_FLAG) {
5049                 /* Test MSI to make sure it is working
5050                  * If MSI test fails, go back to INTx mode
5051                  */
5052                 if (bnx2_test_intr(bp) != 0) {
5053                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5054                                " using MSI, switching to INTx mode. Please"
5055                                " report this failure to the PCI maintainer"
5056                                " and include system chipset information.\n",
5057                                bp->dev->name);
5058
5059                         bnx2_disable_int(bp);
5060                         bnx2_free_irq(bp);
5061
5062                         rc = bnx2_init_nic(bp);
5063
5064                         if (!rc)
5065                                 rc = bnx2_request_irq(bp);
5066
5067                         if (rc) {
5068                                 napi_disable(&bp->napi);
5069                                 bnx2_free_skbs(bp);
5070                                 bnx2_free_mem(bp);
5071                                 del_timer_sync(&bp->timer);
5072                                 return rc;
5073                         }
5074                         bnx2_enable_int(bp);
5075                 }
5076         }
5077         if (bp->flags & USING_MSI_FLAG) {
5078                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5079         }
5080
5081         netif_start_queue(dev);
5082
5083         return 0;
5084 }
5085
5086 static void
5087 bnx2_reset_task(struct work_struct *work)
5088 {
5089         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5090
5091         if (!netif_running(bp->dev))
5092                 return;
5093
5094         bp->in_reset_task = 1;
5095         bnx2_netif_stop(bp);
5096
5097         bnx2_init_nic(bp);
5098
5099         atomic_set(&bp->intr_sem, 1);
5100         bnx2_netif_start(bp);
5101         bp->in_reset_task = 0;
5102 }
5103
5104 static void
5105 bnx2_tx_timeout(struct net_device *dev)
5106 {
5107         struct bnx2 *bp = netdev_priv(dev);
5108
5109         /* This allows the netif to be shutdown gracefully before resetting */
5110         schedule_work(&bp->reset_task);
5111 }
5112
5113 #ifdef BCM_VLAN
5114 /* Called with rtnl_lock */
5115 static void
5116 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5117 {
5118         struct bnx2 *bp = netdev_priv(dev);
5119
5120         bnx2_netif_stop(bp);
5121
5122         bp->vlgrp = vlgrp;
5123         bnx2_set_rx_mode(dev);
5124
5125         bnx2_netif_start(bp);
5126 }
5127 #endif
5128
5129 /* Called with netif_tx_lock.
5130  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5131  * netif_wake_queue().
5132  */
5133 static int
5134 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5135 {
5136         struct bnx2 *bp = netdev_priv(dev);
5137         dma_addr_t mapping;
5138         struct tx_bd *txbd;
5139         struct sw_bd *tx_buf;
5140         u32 len, vlan_tag_flags, last_frag, mss;
5141         u16 prod, ring_prod;
5142         int i;
5143
5144         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5145                 netif_stop_queue(dev);
5146                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5147                         dev->name);
5148
5149                 return NETDEV_TX_BUSY;
5150         }
5151         len = skb_headlen(skb);
5152         prod = bp->tx_prod;
5153         ring_prod = TX_RING_IDX(prod);
5154
5155         vlan_tag_flags = 0;
5156         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5157                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5158         }
5159
5160         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5161                 vlan_tag_flags |=
5162                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5163         }
5164         if ((mss = skb_shinfo(skb)->gso_size)) {
5165                 u32 tcp_opt_len, ip_tcp_len;
5166                 struct iphdr *iph;
5167
5168                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5169
5170                 tcp_opt_len = tcp_optlen(skb);
5171
5172                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5173                         u32 tcp_off = skb_transport_offset(skb) -
5174                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5175
5176                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5177                                           TX_BD_FLAGS_SW_FLAGS;
5178                         if (likely(tcp_off == 0))
5179                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5180                         else {
5181                                 tcp_off >>= 3;
5182                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5183                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5184                                                   ((tcp_off & 0x10) <<
5185                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5186                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5187                         }
5188                 } else {
5189                         if (skb_header_cloned(skb) &&
5190                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5191                                 dev_kfree_skb(skb);
5192                                 return NETDEV_TX_OK;
5193                         }
5194
5195                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5196
5197                         iph = ip_hdr(skb);
5198                         iph->check = 0;
5199                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5200                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5201                                                                  iph->daddr, 0,
5202                                                                  IPPROTO_TCP,
5203                                                                  0);
5204                         if (tcp_opt_len || (iph->ihl > 5)) {
5205                                 vlan_tag_flags |= ((iph->ihl - 5) +
5206                                                    (tcp_opt_len >> 2)) << 8;
5207                         }
5208                 }
5209         } else
5210                 mss = 0;
5211
5212         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5213
5214         tx_buf = &bp->tx_buf_ring[ring_prod];
5215         tx_buf->skb = skb;
5216         pci_unmap_addr_set(tx_buf, mapping, mapping);
5217
5218         txbd = &bp->tx_desc_ring[ring_prod];
5219
5220         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5221         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5222         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5223         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5224
5225         last_frag = skb_shinfo(skb)->nr_frags;
5226
5227         for (i = 0; i < last_frag; i++) {
5228                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5229
5230                 prod = NEXT_TX_BD(prod);
5231                 ring_prod = TX_RING_IDX(prod);
5232                 txbd = &bp->tx_desc_ring[ring_prod];
5233
5234                 len = frag->size;
5235                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5236                         len, PCI_DMA_TODEVICE);
5237                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5238                                 mapping, mapping);
5239
5240                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5241                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5242                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5243                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5244
5245         }
5246         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5247
5248         prod = NEXT_TX_BD(prod);
5249         bp->tx_prod_bseq += skb->len;
5250
5251         REG_WR16(bp, bp->tx_bidx_addr, prod);
5252         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5253
5254         mmiowb();
5255
5256         bp->tx_prod = prod;
5257         dev->trans_start = jiffies;
5258
5259         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5260                 netif_stop_queue(dev);
5261                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5262                         netif_wake_queue(dev);
5263         }
5264
5265         return NETDEV_TX_OK;
5266 }
5267
5268 /* Called with rtnl_lock */
5269 static int
5270 bnx2_close(struct net_device *dev)
5271 {
5272         struct bnx2 *bp = netdev_priv(dev);
5273         u32 reset_code;
5274
5275         /* Calling flush_scheduled_work() may deadlock because
5276          * linkwatch_event() may be on the workqueue and it will try to get
5277          * the rtnl_lock which we are holding.
5278          */
5279         while (bp->in_reset_task)
5280                 msleep(1);
5281
5282         bnx2_disable_int_sync(bp);
5283         napi_disable(&bp->napi);
5284         del_timer_sync(&bp->timer);
5285         if (bp->flags & NO_WOL_FLAG)
5286                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5287         else if (bp->wol)
5288                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5289         else
5290                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5291         bnx2_reset_chip(bp, reset_code);
5292         bnx2_free_irq(bp);
5293         bnx2_free_skbs(bp);
5294         bnx2_free_mem(bp);
5295         bp->link_up = 0;
5296         netif_carrier_off(bp->dev);
5297         bnx2_set_power_state(bp, PCI_D3hot);
5298         return 0;
5299 }
5300
5301 #define GET_NET_STATS64(ctr)                                    \
5302         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5303         (unsigned long) (ctr##_lo)
5304
5305 #define GET_NET_STATS32(ctr)            \
5306         (ctr##_lo)
5307
5308 #if (BITS_PER_LONG == 64)
5309 #define GET_NET_STATS   GET_NET_STATS64
5310 #else
5311 #define GET_NET_STATS   GET_NET_STATS32
5312 #endif
5313
5314 static struct net_device_stats *
5315 bnx2_get_stats(struct net_device *dev)
5316 {
5317         struct bnx2 *bp = netdev_priv(dev);
5318         struct statistics_block *stats_blk = bp->stats_blk;
5319         struct net_device_stats *net_stats = &bp->net_stats;
5320
5321         if (bp->stats_blk == NULL) {
5322                 return net_stats;
5323         }
5324         net_stats->rx_packets =
5325                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5326                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5327                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5328
5329         net_stats->tx_packets =
5330                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5331                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5332                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5333
5334         net_stats->rx_bytes =
5335                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5336
5337         net_stats->tx_bytes =
5338                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5339
5340         net_stats->multicast =
5341                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5342
5343         net_stats->collisions =
5344                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5345
5346         net_stats->rx_length_errors =
5347                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5348                 stats_blk->stat_EtherStatsOverrsizePkts);
5349
5350         net_stats->rx_over_errors =
5351                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5352
5353         net_stats->rx_frame_errors =
5354                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5355
5356         net_stats->rx_crc_errors =
5357                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5358
5359         net_stats->rx_errors = net_stats->rx_length_errors +
5360                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5361                 net_stats->rx_crc_errors;
5362
5363         net_stats->tx_aborted_errors =
5364                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5365                 stats_blk->stat_Dot3StatsLateCollisions);
5366
5367         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5368             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5369                 net_stats->tx_carrier_errors = 0;
5370         else {
5371                 net_stats->tx_carrier_errors =
5372                         (unsigned long)
5373                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5374         }
5375
5376         net_stats->tx_errors =
5377                 (unsigned long)
5378                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5379                 +
5380                 net_stats->tx_aborted_errors +
5381                 net_stats->tx_carrier_errors;
5382
5383         net_stats->rx_missed_errors =
5384                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5385                 stats_blk->stat_FwRxDrop);
5386
5387         return net_stats;
5388 }
5389
5390 /* All ethtool functions called with rtnl_lock */
5391
5392 static int
5393 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5394 {
5395         struct bnx2 *bp = netdev_priv(dev);
5396         int support_serdes = 0, support_copper = 0;
5397
5398         cmd->supported = SUPPORTED_Autoneg;
5399         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5400                 support_serdes = 1;
5401                 support_copper = 1;
5402         } else if (bp->phy_port == PORT_FIBRE)
5403                 support_serdes = 1;
5404         else
5405                 support_copper = 1;
5406
5407         if (support_serdes) {
5408                 cmd->supported |= SUPPORTED_1000baseT_Full |
5409                         SUPPORTED_FIBRE;
5410                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5411                         cmd->supported |= SUPPORTED_2500baseX_Full;
5412
5413         }
5414         if (support_copper) {
5415                 cmd->supported |= SUPPORTED_10baseT_Half |
5416                         SUPPORTED_10baseT_Full |
5417                         SUPPORTED_100baseT_Half |
5418                         SUPPORTED_100baseT_Full |
5419                         SUPPORTED_1000baseT_Full |
5420                         SUPPORTED_TP;
5421
5422         }
5423
5424         spin_lock_bh(&bp->phy_lock);
5425         cmd->port = bp->phy_port;
5426         cmd->advertising = bp->advertising;
5427
5428         if (bp->autoneg & AUTONEG_SPEED) {
5429                 cmd->autoneg = AUTONEG_ENABLE;
5430         }
5431         else {
5432                 cmd->autoneg = AUTONEG_DISABLE;
5433         }
5434
5435         if (netif_carrier_ok(dev)) {
5436                 cmd->speed = bp->line_speed;
5437                 cmd->duplex = bp->duplex;
5438         }
5439         else {
5440                 cmd->speed = -1;
5441                 cmd->duplex = -1;
5442         }
5443         spin_unlock_bh(&bp->phy_lock);
5444
5445         cmd->transceiver = XCVR_INTERNAL;
5446         cmd->phy_address = bp->phy_addr;
5447
5448         return 0;
5449 }
5450
5451 static int
5452 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5453 {
5454         struct bnx2 *bp = netdev_priv(dev);
5455         u8 autoneg = bp->autoneg;
5456         u8 req_duplex = bp->req_duplex;
5457         u16 req_line_speed = bp->req_line_speed;
5458         u32 advertising = bp->advertising;
5459         int err = -EINVAL;
5460
5461         spin_lock_bh(&bp->phy_lock);
5462
5463         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5464                 goto err_out_unlock;
5465
5466         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5467                 goto err_out_unlock;
5468
5469         if (cmd->autoneg == AUTONEG_ENABLE) {
5470                 autoneg |= AUTONEG_SPEED;
5471
5472                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5473
5474                 /* allow advertising 1 speed */
5475                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5476                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5477                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5478                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5479
5480                         if (cmd->port == PORT_FIBRE)
5481                                 goto err_out_unlock;
5482
5483                         advertising = cmd->advertising;
5484
5485                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5486                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5487                             (cmd->port == PORT_TP))
5488                                 goto err_out_unlock;
5489                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5490                         advertising = cmd->advertising;
5491                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5492                         goto err_out_unlock;
5493                 else {
5494                         if (cmd->port == PORT_FIBRE)
5495                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5496                         else
5497                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5498                 }
5499                 advertising |= ADVERTISED_Autoneg;
5500         }
5501         else {
5502                 if (cmd->port == PORT_FIBRE) {
5503                         if ((cmd->speed != SPEED_1000 &&
5504                              cmd->speed != SPEED_2500) ||
5505                             (cmd->duplex != DUPLEX_FULL))
5506                                 goto err_out_unlock;
5507
5508                         if (cmd->speed == SPEED_2500 &&
5509                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5510                                 goto err_out_unlock;
5511                 }
5512                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5513                         goto err_out_unlock;
5514
5515                 autoneg &= ~AUTONEG_SPEED;
5516                 req_line_speed = cmd->speed;
5517                 req_duplex = cmd->duplex;
5518                 advertising = 0;
5519         }
5520
5521         bp->autoneg = autoneg;
5522         bp->advertising = advertising;
5523         bp->req_line_speed = req_line_speed;
5524         bp->req_duplex = req_duplex;
5525
5526         err = bnx2_setup_phy(bp, cmd->port);
5527
5528 err_out_unlock:
5529         spin_unlock_bh(&bp->phy_lock);
5530
5531         return err;
5532 }
5533
5534 static void
5535 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5536 {
5537         struct bnx2 *bp = netdev_priv(dev);
5538
5539         strcpy(info->driver, DRV_MODULE_NAME);
5540         strcpy(info->version, DRV_MODULE_VERSION);
5541         strcpy(info->bus_info, pci_name(bp->pdev));
5542         strcpy(info->fw_version, bp->fw_version);
5543 }
5544
5545 #define BNX2_REGDUMP_LEN                (32 * 1024)
5546
5547 static int
5548 bnx2_get_regs_len(struct net_device *dev)
5549 {
5550         return BNX2_REGDUMP_LEN;
5551 }
5552
5553 static void
5554 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5555 {
5556         u32 *p = _p, i, offset;
5557         u8 *orig_p = _p;
5558         struct bnx2 *bp = netdev_priv(dev);
5559         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5560                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5561                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5562                                  0x1040, 0x1048, 0x1080, 0x10a4,
5563                                  0x1400, 0x1490, 0x1498, 0x14f0,
5564                                  0x1500, 0x155c, 0x1580, 0x15dc,
5565                                  0x1600, 0x1658, 0x1680, 0x16d8,
5566                                  0x1800, 0x1820, 0x1840, 0x1854,
5567                                  0x1880, 0x1894, 0x1900, 0x1984,
5568                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5569                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5570                                  0x2000, 0x2030, 0x23c0, 0x2400,
5571                                  0x2800, 0x2820, 0x2830, 0x2850,
5572                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5573                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5574                                  0x4080, 0x4090, 0x43c0, 0x4458,
5575                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5576                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5577                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5578                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5579                                  0x6800, 0x6848, 0x684c, 0x6860,
5580                                  0x6888, 0x6910, 0x8000 };
5581
5582         regs->version = 0;
5583
5584         memset(p, 0, BNX2_REGDUMP_LEN);
5585
5586         if (!netif_running(bp->dev))
5587                 return;
5588
5589         i = 0;
5590         offset = reg_boundaries[0];
5591         p += offset;
5592         while (offset < BNX2_REGDUMP_LEN) {
5593                 *p++ = REG_RD(bp, offset);
5594                 offset += 4;
5595                 if (offset == reg_boundaries[i + 1]) {
5596                         offset = reg_boundaries[i + 2];
5597                         p = (u32 *) (orig_p + offset);
5598                         i += 2;
5599                 }
5600         }
5601 }
5602
5603 static void
5604 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5605 {
5606         struct bnx2 *bp = netdev_priv(dev);
5607
5608         if (bp->flags & NO_WOL_FLAG) {
5609                 wol->supported = 0;
5610                 wol->wolopts = 0;
5611         }
5612         else {
5613                 wol->supported = WAKE_MAGIC;
5614                 if (bp->wol)
5615                         wol->wolopts = WAKE_MAGIC;
5616                 else
5617                         wol->wolopts = 0;
5618         }
5619         memset(&wol->sopass, 0, sizeof(wol->sopass));
5620 }
5621
5622 static int
5623 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5624 {
5625         struct bnx2 *bp = netdev_priv(dev);
5626
5627         if (wol->wolopts & ~WAKE_MAGIC)
5628                 return -EINVAL;
5629
5630         if (wol->wolopts & WAKE_MAGIC) {
5631                 if (bp->flags & NO_WOL_FLAG)
5632                         return -EINVAL;
5633
5634                 bp->wol = 1;
5635         }
5636         else {
5637                 bp->wol = 0;
5638         }
5639         return 0;
5640 }
5641
5642 static int
5643 bnx2_nway_reset(struct net_device *dev)
5644 {
5645         struct bnx2 *bp = netdev_priv(dev);
5646         u32 bmcr;
5647
5648         if (!(bp->autoneg & AUTONEG_SPEED)) {
5649                 return -EINVAL;
5650         }
5651
5652         spin_lock_bh(&bp->phy_lock);
5653
5654         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5655                 int rc;
5656
5657                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5658                 spin_unlock_bh(&bp->phy_lock);
5659                 return rc;
5660         }
5661
5662         /* Force a link down visible on the other side */
5663         if (bp->phy_flags & PHY_SERDES_FLAG) {
5664                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5665                 spin_unlock_bh(&bp->phy_lock);
5666
5667                 msleep(20);
5668
5669                 spin_lock_bh(&bp->phy_lock);
5670
5671                 bp->current_interval = SERDES_AN_TIMEOUT;
5672                 bp->serdes_an_pending = 1;
5673                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5674         }
5675
5676         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5677         bmcr &= ~BMCR_LOOPBACK;
5678         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5679
5680         spin_unlock_bh(&bp->phy_lock);
5681
5682         return 0;
5683 }
5684
5685 static int
5686 bnx2_get_eeprom_len(struct net_device *dev)
5687 {
5688         struct bnx2 *bp = netdev_priv(dev);
5689
5690         if (bp->flash_info == NULL)
5691                 return 0;
5692
5693         return (int) bp->flash_size;
5694 }
5695
5696 static int
5697 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5698                 u8 *eebuf)
5699 {
5700         struct bnx2 *bp = netdev_priv(dev);
5701         int rc;
5702
5703         /* parameters already validated in ethtool_get_eeprom */
5704
5705         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5706
5707         return rc;
5708 }
5709
5710 static int
5711 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5712                 u8 *eebuf)
5713 {
5714         struct bnx2 *bp = netdev_priv(dev);
5715         int rc;
5716
5717         /* parameters already validated in ethtool_set_eeprom */
5718
5719         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5720
5721         return rc;
5722 }
5723
5724 static int
5725 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5726 {
5727         struct bnx2 *bp = netdev_priv(dev);
5728
5729         memset(coal, 0, sizeof(struct ethtool_coalesce));
5730
5731         coal->rx_coalesce_usecs = bp->rx_ticks;
5732         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5733         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5734         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5735
5736         coal->tx_coalesce_usecs = bp->tx_ticks;
5737         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5738         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5739         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5740
5741         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5742
5743         return 0;
5744 }
5745
5746 static int
5747 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5748 {
5749         struct bnx2 *bp = netdev_priv(dev);
5750
5751         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5752         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5753
5754         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5755         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5756
5757         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5758         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5759
5760         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5761         if (bp->rx_quick_cons_trip_int > 0xff)
5762                 bp->rx_quick_cons_trip_int = 0xff;
5763
5764         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5765         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5766
5767         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5768         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5769
5770         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5771         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5772
5773         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5774         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5775                 0xff;
5776
5777         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5778         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5779                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5780                         bp->stats_ticks = USEC_PER_SEC;
5781         }
5782         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5783                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5784         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5785
5786         if (netif_running(bp->dev)) {
5787                 bnx2_netif_stop(bp);
5788                 bnx2_init_nic(bp);
5789                 bnx2_netif_start(bp);
5790         }
5791
5792         return 0;
5793 }
5794
5795 static void
5796 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5797 {
5798         struct bnx2 *bp = netdev_priv(dev);
5799
5800         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5801         ering->rx_mini_max_pending = 0;
5802         ering->rx_jumbo_max_pending = 0;
5803
5804         ering->rx_pending = bp->rx_ring_size;
5805         ering->rx_mini_pending = 0;
5806         ering->rx_jumbo_pending = 0;
5807
5808         ering->tx_max_pending = MAX_TX_DESC_CNT;
5809         ering->tx_pending = bp->tx_ring_size;
5810 }
5811
5812 static int
5813 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
5814 {
5815         if (netif_running(bp->dev)) {
5816                 bnx2_netif_stop(bp);
5817                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5818                 bnx2_free_skbs(bp);
5819                 bnx2_free_mem(bp);
5820         }
5821
5822         bnx2_set_rx_ring_size(bp, rx);
5823         bp->tx_ring_size = tx;
5824
5825         if (netif_running(bp->dev)) {
5826                 int rc;
5827
5828                 rc = bnx2_alloc_mem(bp);
5829                 if (rc)
5830                         return rc;
5831                 bnx2_init_nic(bp);
5832                 bnx2_netif_start(bp);
5833         }
5834         return 0;
5835 }
5836
5837 static int
5838 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5839 {
5840         struct bnx2 *bp = netdev_priv(dev);
5841         int rc;
5842
5843         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5844                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5845                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5846
5847                 return -EINVAL;
5848         }
5849         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
5850         return rc;
5851 }
5852
5853 static void
5854 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5855 {
5856         struct bnx2 *bp = netdev_priv(dev);
5857
5858         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5859         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5860         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5861 }
5862
5863 static int
5864 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5865 {
5866         struct bnx2 *bp = netdev_priv(dev);
5867
5868         bp->req_flow_ctrl = 0;
5869         if (epause->rx_pause)
5870                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5871         if (epause->tx_pause)
5872                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5873
5874         if (epause->autoneg) {
5875                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5876         }
5877         else {
5878                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5879         }
5880
5881         spin_lock_bh(&bp->phy_lock);
5882
5883         bnx2_setup_phy(bp, bp->phy_port);
5884
5885         spin_unlock_bh(&bp->phy_lock);
5886
5887         return 0;
5888 }
5889
5890 static u32
5891 bnx2_get_rx_csum(struct net_device *dev)
5892 {
5893         struct bnx2 *bp = netdev_priv(dev);
5894
5895         return bp->rx_csum;
5896 }
5897
5898 static int
5899 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5900 {
5901         struct bnx2 *bp = netdev_priv(dev);
5902
5903         bp->rx_csum = data;
5904         return 0;
5905 }
5906
5907 static int
5908 bnx2_set_tso(struct net_device *dev, u32 data)
5909 {
5910         struct bnx2 *bp = netdev_priv(dev);
5911
5912         if (data) {
5913                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5914                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5915                         dev->features |= NETIF_F_TSO6;
5916         } else
5917                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5918                                    NETIF_F_TSO_ECN);
5919         return 0;
5920 }
5921
5922 #define BNX2_NUM_STATS 46
5923
5924 static struct {
5925         char string[ETH_GSTRING_LEN];
5926 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5927         { "rx_bytes" },
5928         { "rx_error_bytes" },
5929         { "tx_bytes" },
5930         { "tx_error_bytes" },
5931         { "rx_ucast_packets" },
5932         { "rx_mcast_packets" },
5933         { "rx_bcast_packets" },
5934         { "tx_ucast_packets" },
5935         { "tx_mcast_packets" },
5936         { "tx_bcast_packets" },
5937         { "tx_mac_errors" },
5938         { "tx_carrier_errors" },
5939         { "rx_crc_errors" },
5940         { "rx_align_errors" },
5941         { "tx_single_collisions" },
5942         { "tx_multi_collisions" },
5943         { "tx_deferred" },
5944         { "tx_excess_collisions" },
5945         { "tx_late_collisions" },
5946         { "tx_total_collisions" },
5947         { "rx_fragments" },
5948         { "rx_jabbers" },
5949         { "rx_undersize_packets" },
5950         { "rx_oversize_packets" },
5951         { "rx_64_byte_packets" },
5952         { "rx_65_to_127_byte_packets" },
5953         { "rx_128_to_255_byte_packets" },
5954         { "rx_256_to_511_byte_packets" },
5955         { "rx_512_to_1023_byte_packets" },
5956         { "rx_1024_to_1522_byte_packets" },
5957         { "rx_1523_to_9022_byte_packets" },
5958         { "tx_64_byte_packets" },
5959         { "tx_65_to_127_byte_packets" },
5960         { "tx_128_to_255_byte_packets" },
5961         { "tx_256_to_511_byte_packets" },
5962         { "tx_512_to_1023_byte_packets" },
5963         { "tx_1024_to_1522_byte_packets" },
5964         { "tx_1523_to_9022_byte_packets" },
5965         { "rx_xon_frames" },
5966         { "rx_xoff_frames" },
5967         { "tx_xon_frames" },
5968         { "tx_xoff_frames" },
5969         { "rx_mac_ctrl_frames" },
5970         { "rx_filtered_packets" },
5971         { "rx_discards" },
5972         { "rx_fw_discards" },
5973 };
5974
5975 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5976
5977 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5978     STATS_OFFSET32(stat_IfHCInOctets_hi),
5979     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5980     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5981     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5982     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5983     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5984     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5985     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5986     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5987     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5988     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5989     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5990     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5991     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5992     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5993     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5994     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5995     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5996     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5997     STATS_OFFSET32(stat_EtherStatsCollisions),
5998     STATS_OFFSET32(stat_EtherStatsFragments),
5999     STATS_OFFSET32(stat_EtherStatsJabbers),
6000     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6001     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6002     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6003     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6004     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6005     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6006     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6007     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6008     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6009     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6010     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6011     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6012     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6013     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6014     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6015     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6016     STATS_OFFSET32(stat_XonPauseFramesReceived),
6017     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6018     STATS_OFFSET32(stat_OutXonSent),
6019     STATS_OFFSET32(stat_OutXoffSent),
6020     STATS_OFFSET32(stat_MacControlFramesReceived),
6021     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6022     STATS_OFFSET32(stat_IfInMBUFDiscards),
6023     STATS_OFFSET32(stat_FwRxDrop),
6024 };
6025
6026 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6027  * skipped because of errata.
6028  */
6029 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6030         8,0,8,8,8,8,8,8,8,8,
6031         4,0,4,4,4,4,4,4,4,4,
6032         4,4,4,4,4,4,4,4,4,4,
6033         4,4,4,4,4,4,4,4,4,4,
6034         4,4,4,4,4,4,
6035 };
6036
6037 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6038         8,0,8,8,8,8,8,8,8,8,
6039         4,4,4,4,4,4,4,4,4,4,
6040         4,4,4,4,4,4,4,4,4,4,
6041         4,4,4,4,4,4,4,4,4,4,
6042         4,4,4,4,4,4,
6043 };
6044
6045 #define BNX2_NUM_TESTS 6
6046
6047 static struct {
6048         char string[ETH_GSTRING_LEN];
6049 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6050         { "register_test (offline)" },
6051         { "memory_test (offline)" },
6052         { "loopback_test (offline)" },
6053         { "nvram_test (online)" },
6054         { "interrupt_test (online)" },
6055         { "link_test (online)" },
6056 };
6057
6058 static int
6059 bnx2_get_sset_count(struct net_device *dev, int sset)
6060 {
6061         switch (sset) {
6062         case ETH_SS_TEST:
6063                 return BNX2_NUM_TESTS;
6064         case ETH_SS_STATS:
6065                 return BNX2_NUM_STATS;
6066         default:
6067                 return -EOPNOTSUPP;
6068         }
6069 }
6070
6071 static void
6072 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6073 {
6074         struct bnx2 *bp = netdev_priv(dev);
6075
6076         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6077         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6078                 int i;
6079
6080                 bnx2_netif_stop(bp);
6081                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6082                 bnx2_free_skbs(bp);
6083
6084                 if (bnx2_test_registers(bp) != 0) {
6085                         buf[0] = 1;
6086                         etest->flags |= ETH_TEST_FL_FAILED;
6087                 }
6088                 if (bnx2_test_memory(bp) != 0) {
6089                         buf[1] = 1;
6090                         etest->flags |= ETH_TEST_FL_FAILED;
6091                 }
6092                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6093                         etest->flags |= ETH_TEST_FL_FAILED;
6094
6095                 if (!netif_running(bp->dev)) {
6096                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6097                 }
6098                 else {
6099                         bnx2_init_nic(bp);
6100                         bnx2_netif_start(bp);
6101                 }
6102
6103                 /* wait for link up */
6104                 for (i = 0; i < 7; i++) {
6105                         if (bp->link_up)
6106                                 break;
6107                         msleep_interruptible(1000);
6108                 }
6109         }
6110
6111         if (bnx2_test_nvram(bp) != 0) {
6112                 buf[3] = 1;
6113                 etest->flags |= ETH_TEST_FL_FAILED;
6114         }
6115         if (bnx2_test_intr(bp) != 0) {
6116                 buf[4] = 1;
6117                 etest->flags |= ETH_TEST_FL_FAILED;
6118         }
6119
6120         if (bnx2_test_link(bp) != 0) {
6121                 buf[5] = 1;
6122                 etest->flags |= ETH_TEST_FL_FAILED;
6123
6124         }
6125 }
6126
6127 static void
6128 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6129 {
6130         switch (stringset) {
6131         case ETH_SS_STATS:
6132                 memcpy(buf, bnx2_stats_str_arr,
6133                         sizeof(bnx2_stats_str_arr));
6134                 break;
6135         case ETH_SS_TEST:
6136                 memcpy(buf, bnx2_tests_str_arr,
6137                         sizeof(bnx2_tests_str_arr));
6138                 break;
6139         }
6140 }
6141
6142 static void
6143 bnx2_get_ethtool_stats(struct net_device *dev,
6144                 struct ethtool_stats *stats, u64 *buf)
6145 {
6146         struct bnx2 *bp = netdev_priv(dev);
6147         int i;
6148         u32 *hw_stats = (u32 *) bp->stats_blk;
6149         u8 *stats_len_arr = NULL;
6150
6151         if (hw_stats == NULL) {
6152                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6153                 return;
6154         }
6155
6156         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6157             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6158             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6159             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6160                 stats_len_arr = bnx2_5706_stats_len_arr;
6161         else
6162                 stats_len_arr = bnx2_5708_stats_len_arr;
6163
6164         for (i = 0; i < BNX2_NUM_STATS; i++) {
6165                 if (stats_len_arr[i] == 0) {
6166                         /* skip this counter */
6167                         buf[i] = 0;
6168                         continue;
6169                 }
6170                 if (stats_len_arr[i] == 4) {
6171                         /* 4-byte counter */
6172                         buf[i] = (u64)
6173                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6174                         continue;
6175                 }
6176                 /* 8-byte counter */
6177                 buf[i] = (((u64) *(hw_stats +
6178                                         bnx2_stats_offset_arr[i])) << 32) +
6179                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6180         }
6181 }
6182
6183 static int
6184 bnx2_phys_id(struct net_device *dev, u32 data)
6185 {
6186         struct bnx2 *bp = netdev_priv(dev);
6187         int i;
6188         u32 save;
6189
6190         if (data == 0)
6191                 data = 2;
6192
6193         save = REG_RD(bp, BNX2_MISC_CFG);
6194         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6195
6196         for (i = 0; i < (data * 2); i++) {
6197                 if ((i % 2) == 0) {
6198                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6199                 }
6200                 else {
6201                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6202                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6203                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6204                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6205                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6206                                 BNX2_EMAC_LED_TRAFFIC);
6207                 }
6208                 msleep_interruptible(500);
6209                 if (signal_pending(current))
6210                         break;
6211         }
6212         REG_WR(bp, BNX2_EMAC_LED, 0);
6213         REG_WR(bp, BNX2_MISC_CFG, save);
6214         return 0;
6215 }
6216
6217 static int
6218 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6219 {
6220         struct bnx2 *bp = netdev_priv(dev);
6221
6222         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6223                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6224         else
6225                 return (ethtool_op_set_tx_csum(dev, data));
6226 }
6227
6228 static const struct ethtool_ops bnx2_ethtool_ops = {
6229         .get_settings           = bnx2_get_settings,
6230         .set_settings           = bnx2_set_settings,
6231         .get_drvinfo            = bnx2_get_drvinfo,
6232         .get_regs_len           = bnx2_get_regs_len,
6233         .get_regs               = bnx2_get_regs,
6234         .get_wol                = bnx2_get_wol,
6235         .set_wol                = bnx2_set_wol,
6236         .nway_reset             = bnx2_nway_reset,
6237         .get_link               = ethtool_op_get_link,
6238         .get_eeprom_len         = bnx2_get_eeprom_len,
6239         .get_eeprom             = bnx2_get_eeprom,
6240         .set_eeprom             = bnx2_set_eeprom,
6241         .get_coalesce           = bnx2_get_coalesce,
6242         .set_coalesce           = bnx2_set_coalesce,
6243         .get_ringparam          = bnx2_get_ringparam,
6244         .set_ringparam          = bnx2_set_ringparam,
6245         .get_pauseparam         = bnx2_get_pauseparam,
6246         .set_pauseparam         = bnx2_set_pauseparam,
6247         .get_rx_csum            = bnx2_get_rx_csum,
6248         .set_rx_csum            = bnx2_set_rx_csum,
6249         .set_tx_csum            = bnx2_set_tx_csum,
6250         .set_sg                 = ethtool_op_set_sg,
6251         .set_tso                = bnx2_set_tso,
6252         .self_test              = bnx2_self_test,
6253         .get_strings            = bnx2_get_strings,
6254         .phys_id                = bnx2_phys_id,
6255         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6256         .get_sset_count         = bnx2_get_sset_count,
6257 };
6258
6259 /* Called with rtnl_lock */
6260 static int
6261 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6262 {
6263         struct mii_ioctl_data *data = if_mii(ifr);
6264         struct bnx2 *bp = netdev_priv(dev);
6265         int err;
6266
6267         switch(cmd) {
6268         case SIOCGMIIPHY:
6269                 data->phy_id = bp->phy_addr;
6270
6271                 /* fallthru */
6272         case SIOCGMIIREG: {
6273                 u32 mii_regval;
6274
6275                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6276                         return -EOPNOTSUPP;
6277
6278                 if (!netif_running(dev))
6279                         return -EAGAIN;
6280
6281                 spin_lock_bh(&bp->phy_lock);
6282                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6283                 spin_unlock_bh(&bp->phy_lock);
6284
6285                 data->val_out = mii_regval;
6286
6287                 return err;
6288         }
6289
6290         case SIOCSMIIREG:
6291                 if (!capable(CAP_NET_ADMIN))
6292                         return -EPERM;
6293
6294                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6295                         return -EOPNOTSUPP;
6296
6297                 if (!netif_running(dev))
6298                         return -EAGAIN;
6299
6300                 spin_lock_bh(&bp->phy_lock);
6301                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6302                 spin_unlock_bh(&bp->phy_lock);
6303
6304                 return err;
6305
6306         default:
6307                 /* do nothing */
6308                 break;
6309         }
6310         return -EOPNOTSUPP;
6311 }
6312
6313 /* Called with rtnl_lock */
6314 static int
6315 bnx2_change_mac_addr(struct net_device *dev, void *p)
6316 {
6317         struct sockaddr *addr = p;
6318         struct bnx2 *bp = netdev_priv(dev);
6319
6320         if (!is_valid_ether_addr(addr->sa_data))
6321                 return -EINVAL;
6322
6323         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6324         if (netif_running(dev))
6325                 bnx2_set_mac_addr(bp);
6326
6327         return 0;
6328 }
6329
6330 /* Called with rtnl_lock */
6331 static int
6332 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6333 {
6334         struct bnx2 *bp = netdev_priv(dev);
6335
6336         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6337                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6338                 return -EINVAL;
6339
6340         dev->mtu = new_mtu;
6341         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6342 }
6343
6344 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6345 static void
6346 poll_bnx2(struct net_device *dev)
6347 {
6348         struct bnx2 *bp = netdev_priv(dev);
6349
6350         disable_irq(bp->pdev->irq);
6351         bnx2_interrupt(bp->pdev->irq, dev);
6352         enable_irq(bp->pdev->irq);
6353 }
6354 #endif
6355
6356 static void __devinit
6357 bnx2_get_5709_media(struct bnx2 *bp)
6358 {
6359         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6360         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6361         u32 strap;
6362
6363         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6364                 return;
6365         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6366                 bp->phy_flags |= PHY_SERDES_FLAG;
6367                 return;
6368         }
6369
6370         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6371                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6372         else
6373                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6374
6375         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6376                 switch (strap) {
6377                 case 0x4:
6378                 case 0x5:
6379                 case 0x6:
6380                         bp->phy_flags |= PHY_SERDES_FLAG;
6381                         return;
6382                 }
6383         } else {
6384                 switch (strap) {
6385                 case 0x1:
6386                 case 0x2:
6387                 case 0x4:
6388                         bp->phy_flags |= PHY_SERDES_FLAG;
6389                         return;
6390                 }
6391         }
6392 }
6393
6394 static void __devinit
6395 bnx2_get_pci_speed(struct bnx2 *bp)
6396 {
6397         u32 reg;
6398
6399         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6400         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6401                 u32 clkreg;
6402
6403                 bp->flags |= PCIX_FLAG;
6404
6405                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6406
6407                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6408                 switch (clkreg) {
6409                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6410                         bp->bus_speed_mhz = 133;
6411                         break;
6412
6413                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6414                         bp->bus_speed_mhz = 100;
6415                         break;
6416
6417                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6418                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6419                         bp->bus_speed_mhz = 66;
6420                         break;
6421
6422                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6423                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6424                         bp->bus_speed_mhz = 50;
6425                         break;
6426
6427                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6428                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6429                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6430                         bp->bus_speed_mhz = 33;
6431                         break;
6432                 }
6433         }
6434         else {
6435                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6436                         bp->bus_speed_mhz = 66;
6437                 else
6438                         bp->bus_speed_mhz = 33;
6439         }
6440
6441         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6442                 bp->flags |= PCI_32BIT_FLAG;
6443
6444 }
6445
6446 static int __devinit
6447 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6448 {
6449         struct bnx2 *bp;
6450         unsigned long mem_len;
6451         int rc, i, j;
6452         u32 reg;
6453         u64 dma_mask, persist_dma_mask;
6454
6455         SET_NETDEV_DEV(dev, &pdev->dev);
6456         bp = netdev_priv(dev);
6457
6458         bp->flags = 0;
6459         bp->phy_flags = 0;
6460
6461         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6462         rc = pci_enable_device(pdev);
6463         if (rc) {
6464                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6465                 goto err_out;
6466         }
6467
6468         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6469                 dev_err(&pdev->dev,
6470                         "Cannot find PCI device base address, aborting.\n");
6471                 rc = -ENODEV;
6472                 goto err_out_disable;
6473         }
6474
6475         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6476         if (rc) {
6477                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6478                 goto err_out_disable;
6479         }
6480
6481         pci_set_master(pdev);
6482
6483         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6484         if (bp->pm_cap == 0) {
6485                 dev_err(&pdev->dev,
6486                         "Cannot find power management capability, aborting.\n");
6487                 rc = -EIO;
6488                 goto err_out_release;
6489         }
6490
6491         bp->dev = dev;
6492         bp->pdev = pdev;
6493
6494         spin_lock_init(&bp->phy_lock);
6495         spin_lock_init(&bp->indirect_lock);
6496         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6497
6498         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6499         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6500         dev->mem_end = dev->mem_start + mem_len;
6501         dev->irq = pdev->irq;
6502
6503         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6504
6505         if (!bp->regview) {
6506                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6507                 rc = -ENOMEM;
6508                 goto err_out_release;
6509         }
6510
6511         /* Configure byte swap and enable write to the reg_window registers.
6512          * Rely on CPU to do target byte swapping on big endian systems
6513          * The chip's target access swapping will not swap all accesses
6514          */
6515         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6516                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6517                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6518
6519         bnx2_set_power_state(bp, PCI_D0);
6520
6521         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6522
6523         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6524                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6525                         dev_err(&pdev->dev,
6526                                 "Cannot find PCIE capability, aborting.\n");
6527                         rc = -EIO;
6528                         goto err_out_unmap;
6529                 }
6530                 bp->flags |= PCIE_FLAG;
6531         } else {
6532                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6533                 if (bp->pcix_cap == 0) {
6534                         dev_err(&pdev->dev,
6535                                 "Cannot find PCIX capability, aborting.\n");
6536                         rc = -EIO;
6537                         goto err_out_unmap;
6538                 }
6539         }
6540
6541         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6542                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6543                         bp->flags |= MSI_CAP_FLAG;
6544         }
6545
6546         /* 5708 cannot support DMA addresses > 40-bit.  */
6547         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6548                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6549         else
6550                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6551
6552         /* Configure DMA attributes. */
6553         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6554                 dev->features |= NETIF_F_HIGHDMA;
6555                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6556                 if (rc) {
6557                         dev_err(&pdev->dev,
6558                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6559                         goto err_out_unmap;
6560                 }
6561         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6562                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6563                 goto err_out_unmap;
6564         }
6565
6566         if (!(bp->flags & PCIE_FLAG))
6567                 bnx2_get_pci_speed(bp);
6568
6569         /* 5706A0 may falsely detect SERR and PERR. */
6570         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6571                 reg = REG_RD(bp, PCI_COMMAND);
6572                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6573                 REG_WR(bp, PCI_COMMAND, reg);
6574         }
6575         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6576                 !(bp->flags & PCIX_FLAG)) {
6577
6578                 dev_err(&pdev->dev,
6579                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6580                 goto err_out_unmap;
6581         }
6582
6583         bnx2_init_nvram(bp);
6584
6585         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6586
6587         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6588             BNX2_SHM_HDR_SIGNATURE_SIG) {
6589                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6590
6591                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6592         } else
6593                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6594
6595         /* Get the permanent MAC address.  First we need to make sure the
6596          * firmware is actually running.
6597          */
6598         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6599
6600         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6601             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6602                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6603                 rc = -ENODEV;
6604                 goto err_out_unmap;
6605         }
6606
6607         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6608         for (i = 0, j = 0; i < 3; i++) {
6609                 u8 num, k, skip0;
6610
6611                 num = (u8) (reg >> (24 - (i * 8)));
6612                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6613                         if (num >= k || !skip0 || k == 1) {
6614                                 bp->fw_version[j++] = (num / k) + '0';
6615                                 skip0 = 0;
6616                         }
6617                 }
6618                 if (i != 2)
6619                         bp->fw_version[j++] = '.';
6620         }
6621         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6622         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6623                 bp->wol = 1;
6624
6625         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6626                 bp->flags |= ASF_ENABLE_FLAG;
6627
6628                 for (i = 0; i < 30; i++) {
6629                         reg = REG_RD_IND(bp, bp->shmem_base +
6630                                              BNX2_BC_STATE_CONDITION);
6631                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6632                                 break;
6633                         msleep(10);
6634                 }
6635         }
6636         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6637         reg &= BNX2_CONDITION_MFW_RUN_MASK;
6638         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6639             reg != BNX2_CONDITION_MFW_RUN_NONE) {
6640                 int i;
6641                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6642
6643                 bp->fw_version[j++] = ' ';
6644                 for (i = 0; i < 3; i++) {
6645                         reg = REG_RD_IND(bp, addr + i * 4);
6646                         reg = swab32(reg);
6647                         memcpy(&bp->fw_version[j], &reg, 4);
6648                         j += 4;
6649                 }
6650         }
6651
6652         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6653         bp->mac_addr[0] = (u8) (reg >> 8);
6654         bp->mac_addr[1] = (u8) reg;
6655
6656         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6657         bp->mac_addr[2] = (u8) (reg >> 24);
6658         bp->mac_addr[3] = (u8) (reg >> 16);
6659         bp->mac_addr[4] = (u8) (reg >> 8);
6660         bp->mac_addr[5] = (u8) reg;
6661
6662         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6663
6664         bp->tx_ring_size = MAX_TX_DESC_CNT;
6665         bnx2_set_rx_ring_size(bp, 255);
6666
6667         bp->rx_csum = 1;
6668
6669         bp->tx_quick_cons_trip_int = 20;
6670         bp->tx_quick_cons_trip = 20;
6671         bp->tx_ticks_int = 80;
6672         bp->tx_ticks = 80;
6673
6674         bp->rx_quick_cons_trip_int = 6;
6675         bp->rx_quick_cons_trip = 6;
6676         bp->rx_ticks_int = 18;
6677         bp->rx_ticks = 18;
6678
6679         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6680
6681         bp->timer_interval =  HZ;
6682         bp->current_interval =  HZ;
6683
6684         bp->phy_addr = 1;
6685
6686         /* Disable WOL support if we are running on a SERDES chip. */
6687         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6688                 bnx2_get_5709_media(bp);
6689         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6690                 bp->phy_flags |= PHY_SERDES_FLAG;
6691
6692         bp->phy_port = PORT_TP;
6693         if (bp->phy_flags & PHY_SERDES_FLAG) {
6694                 bp->phy_port = PORT_FIBRE;
6695                 reg = REG_RD_IND(bp, bp->shmem_base +
6696                                      BNX2_SHARED_HW_CFG_CONFIG);
6697                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6698                         bp->flags |= NO_WOL_FLAG;
6699                         bp->wol = 0;
6700                 }
6701                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6702                         bp->phy_addr = 2;
6703                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6704                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6705                 }
6706                 bnx2_init_remote_phy(bp);
6707
6708         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6709                    CHIP_NUM(bp) == CHIP_NUM_5708)
6710                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6711         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6712                  (CHIP_REV(bp) == CHIP_REV_Ax ||
6713                   CHIP_REV(bp) == CHIP_REV_Bx))
6714                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6715
6716         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6717             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6718             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6719                 bp->flags |= NO_WOL_FLAG;
6720                 bp->wol = 0;
6721         }
6722
6723         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6724                 bp->tx_quick_cons_trip_int =
6725                         bp->tx_quick_cons_trip;
6726                 bp->tx_ticks_int = bp->tx_ticks;
6727                 bp->rx_quick_cons_trip_int =
6728                         bp->rx_quick_cons_trip;
6729                 bp->rx_ticks_int = bp->rx_ticks;
6730                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6731                 bp->com_ticks_int = bp->com_ticks;
6732                 bp->cmd_ticks_int = bp->cmd_ticks;
6733         }
6734
6735         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6736          *
6737          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6738          * with byte enables disabled on the unused 32-bit word.  This is legal
6739          * but causes problems on the AMD 8132 which will eventually stop
6740          * responding after a while.
6741          *
6742          * AMD believes this incompatibility is unique to the 5706, and
6743          * prefers to locally disable MSI rather than globally disabling it.
6744          */
6745         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6746                 struct pci_dev *amd_8132 = NULL;
6747
6748                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6749                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6750                                                   amd_8132))) {
6751
6752                         if (amd_8132->revision >= 0x10 &&
6753                             amd_8132->revision <= 0x13) {
6754                                 disable_msi = 1;
6755                                 pci_dev_put(amd_8132);
6756                                 break;
6757                         }
6758                 }
6759         }
6760
6761         bnx2_set_default_link(bp);
6762         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6763
6764         init_timer(&bp->timer);
6765         bp->timer.expires = RUN_AT(bp->timer_interval);
6766         bp->timer.data = (unsigned long) bp;
6767         bp->timer.function = bnx2_timer;
6768
6769         return 0;
6770
6771 err_out_unmap:
6772         if (bp->regview) {
6773                 iounmap(bp->regview);
6774                 bp->regview = NULL;
6775         }
6776
6777 err_out_release:
6778         pci_release_regions(pdev);
6779
6780 err_out_disable:
6781         pci_disable_device(pdev);
6782         pci_set_drvdata(pdev, NULL);
6783
6784 err_out:
6785         return rc;
6786 }
6787
6788 static char * __devinit
6789 bnx2_bus_string(struct bnx2 *bp, char *str)
6790 {
6791         char *s = str;
6792
6793         if (bp->flags & PCIE_FLAG) {
6794                 s += sprintf(s, "PCI Express");
6795         } else {
6796                 s += sprintf(s, "PCI");
6797                 if (bp->flags & PCIX_FLAG)
6798                         s += sprintf(s, "-X");
6799                 if (bp->flags & PCI_32BIT_FLAG)
6800                         s += sprintf(s, " 32-bit");
6801                 else
6802                         s += sprintf(s, " 64-bit");
6803                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6804         }
6805         return str;
6806 }
6807
6808 static int __devinit
6809 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6810 {
6811         static int version_printed = 0;
6812         struct net_device *dev = NULL;
6813         struct bnx2 *bp;
6814         int rc;
6815         char str[40];
6816         DECLARE_MAC_BUF(mac);
6817
6818         if (version_printed++ == 0)
6819                 printk(KERN_INFO "%s", version);
6820
6821         /* dev zeroed in init_etherdev */
6822         dev = alloc_etherdev(sizeof(*bp));
6823
6824         if (!dev)
6825                 return -ENOMEM;
6826
6827         rc = bnx2_init_board(pdev, dev);
6828         if (rc < 0) {
6829                 free_netdev(dev);
6830                 return rc;
6831         }
6832
6833         dev->open = bnx2_open;
6834         dev->hard_start_xmit = bnx2_start_xmit;
6835         dev->stop = bnx2_close;
6836         dev->get_stats = bnx2_get_stats;
6837         dev->set_multicast_list = bnx2_set_rx_mode;
6838         dev->do_ioctl = bnx2_ioctl;
6839         dev->set_mac_address = bnx2_change_mac_addr;
6840         dev->change_mtu = bnx2_change_mtu;
6841         dev->tx_timeout = bnx2_tx_timeout;
6842         dev->watchdog_timeo = TX_TIMEOUT;
6843 #ifdef BCM_VLAN
6844         dev->vlan_rx_register = bnx2_vlan_rx_register;
6845 #endif
6846         dev->ethtool_ops = &bnx2_ethtool_ops;
6847
6848         bp = netdev_priv(dev);
6849         netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6850
6851 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6852         dev->poll_controller = poll_bnx2;
6853 #endif
6854
6855         pci_set_drvdata(pdev, dev);
6856
6857         memcpy(dev->dev_addr, bp->mac_addr, 6);
6858         memcpy(dev->perm_addr, bp->mac_addr, 6);
6859         bp->name = board_info[ent->driver_data].name;
6860
6861         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6862         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6863                 dev->features |= NETIF_F_IPV6_CSUM;
6864
6865 #ifdef BCM_VLAN
6866         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6867 #endif
6868         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6869         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6870                 dev->features |= NETIF_F_TSO6;
6871
6872         if ((rc = register_netdev(dev))) {
6873                 dev_err(&pdev->dev, "Cannot register net device\n");
6874                 if (bp->regview)
6875                         iounmap(bp->regview);
6876                 pci_release_regions(pdev);
6877                 pci_disable_device(pdev);
6878                 pci_set_drvdata(pdev, NULL);
6879                 free_netdev(dev);
6880                 return rc;
6881         }
6882
6883         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6884                 "IRQ %d, node addr %s\n",
6885                 dev->name,
6886                 bp->name,
6887                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6888                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6889                 bnx2_bus_string(bp, str),
6890                 dev->base_addr,
6891                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6892
6893         return 0;
6894 }
6895
6896 static void __devexit
6897 bnx2_remove_one(struct pci_dev *pdev)
6898 {
6899         struct net_device *dev = pci_get_drvdata(pdev);
6900         struct bnx2 *bp = netdev_priv(dev);
6901
6902         flush_scheduled_work();
6903
6904         unregister_netdev(dev);
6905
6906         if (bp->regview)
6907                 iounmap(bp->regview);
6908
6909         free_netdev(dev);
6910         pci_release_regions(pdev);
6911         pci_disable_device(pdev);
6912         pci_set_drvdata(pdev, NULL);
6913 }
6914
6915 static int
6916 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6917 {
6918         struct net_device *dev = pci_get_drvdata(pdev);
6919         struct bnx2 *bp = netdev_priv(dev);
6920         u32 reset_code;
6921
6922         /* PCI register 4 needs to be saved whether netif_running() or not.
6923          * MSI address and data need to be saved if using MSI and
6924          * netif_running().
6925          */
6926         pci_save_state(pdev);
6927         if (!netif_running(dev))
6928                 return 0;
6929
6930         flush_scheduled_work();
6931         bnx2_netif_stop(bp);
6932         netif_device_detach(dev);
6933         del_timer_sync(&bp->timer);
6934         if (bp->flags & NO_WOL_FLAG)
6935                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6936         else if (bp->wol)
6937                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6938         else
6939                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6940         bnx2_reset_chip(bp, reset_code);
6941         bnx2_free_skbs(bp);
6942         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6943         return 0;
6944 }
6945
6946 static int
6947 bnx2_resume(struct pci_dev *pdev)
6948 {
6949         struct net_device *dev = pci_get_drvdata(pdev);
6950         struct bnx2 *bp = netdev_priv(dev);
6951
6952         pci_restore_state(pdev);
6953         if (!netif_running(dev))
6954                 return 0;
6955
6956         bnx2_set_power_state(bp, PCI_D0);
6957         netif_device_attach(dev);
6958         bnx2_init_nic(bp);
6959         bnx2_netif_start(bp);
6960         return 0;
6961 }
6962
6963 static struct pci_driver bnx2_pci_driver = {
6964         .name           = DRV_MODULE_NAME,
6965         .id_table       = bnx2_pci_tbl,
6966         .probe          = bnx2_init_one,
6967         .remove         = __devexit_p(bnx2_remove_one),
6968         .suspend        = bnx2_suspend,
6969         .resume         = bnx2_resume,
6970 };
6971
6972 static int __init bnx2_init(void)
6973 {
6974         return pci_register_driver(&bnx2_pci_driver);
6975 }
6976
6977 static void __exit bnx2_cleanup(void)
6978 {
6979         pci_unregister_driver(&bnx2_pci_driver);
6980 }
6981
6982 module_init(bnx2_init);
6983 module_exit(bnx2_cleanup);
6984
6985
6986