]> err.no Git - linux-2.6/blob - drivers/net/bnx2.c
bnx2: Put rx ring variables in a separate struct.
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.6"
60 #define DRV_MODULE_RELDATE      "May 16, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = txr->tx_prod - txr->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_tx_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->num_tx_rings; i++) {
504                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506
507                 if (txr->tx_desc_ring) {
508                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
509                                             txr->tx_desc_ring,
510                                             txr->tx_desc_mapping);
511                         txr->tx_desc_ring = NULL;
512                 }
513                 kfree(txr->tx_buf_ring);
514                 txr->tx_buf_ring = NULL;
515         }
516 }
517
518 static void
519 bnx2_free_rx_mem(struct bnx2 *bp)
520 {
521         int i;
522
523         for (i = 0; i < bp->num_rx_rings; i++) {
524                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
526                 int j;
527
528                 for (j = 0; j < bp->rx_max_ring; j++) {
529                         if (rxr->rx_desc_ring[j])
530                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531                                                     rxr->rx_desc_ring[j],
532                                                     rxr->rx_desc_mapping[j]);
533                         rxr->rx_desc_ring[j] = NULL;
534                 }
535                 if (rxr->rx_buf_ring)
536                         vfree(rxr->rx_buf_ring);
537                 rxr->rx_buf_ring = NULL;
538
539                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540                         if (rxr->rx_pg_desc_ring[j])
541                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542                                                     rxr->rx_pg_desc_ring[i],
543                                                     rxr->rx_pg_desc_mapping[i]);
544                         rxr->rx_pg_desc_ring[i] = NULL;
545                 }
546                 if (rxr->rx_pg_ring)
547                         vfree(rxr->rx_pg_ring);
548                 rxr->rx_pg_ring = NULL;
549         }
550 }
551
552 static int
553 bnx2_alloc_tx_mem(struct bnx2 *bp)
554 {
555         int i;
556
557         for (i = 0; i < bp->num_tx_rings; i++) {
558                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
559                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
560
561                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
562                 if (txr->tx_buf_ring == NULL)
563                         return -ENOMEM;
564
565                 txr->tx_desc_ring =
566                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
567                                              &txr->tx_desc_mapping);
568                 if (txr->tx_desc_ring == NULL)
569                         return -ENOMEM;
570         }
571         return 0;
572 }
573
574 static int
575 bnx2_alloc_rx_mem(struct bnx2 *bp)
576 {
577         int i;
578
579         for (i = 0; i < bp->num_rx_rings; i++) {
580                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
582                 int j;
583
584                 rxr->rx_buf_ring =
585                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586                 if (rxr->rx_buf_ring == NULL)
587                         return -ENOMEM;
588
589                 memset(rxr->rx_buf_ring, 0,
590                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
591
592                 for (j = 0; j < bp->rx_max_ring; j++) {
593                         rxr->rx_desc_ring[j] =
594                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595                                                      &rxr->rx_desc_mapping[j]);
596                         if (rxr->rx_desc_ring[j] == NULL)
597                                 return -ENOMEM;
598
599                 }
600
601                 if (bp->rx_pg_ring_size) {
602                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
603                                                   bp->rx_max_pg_ring);
604                         if (rxr->rx_pg_ring == NULL)
605                                 return -ENOMEM;
606
607                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
608                                bp->rx_max_pg_ring);
609                 }
610
611                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612                         rxr->rx_pg_desc_ring[j] =
613                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614                                                 &rxr->rx_pg_desc_mapping[j]);
615                         if (rxr->rx_pg_desc_ring[j] == NULL)
616                                 return -ENOMEM;
617
618                 }
619         }
620         return 0;
621 }
622
623 static void
624 bnx2_free_mem(struct bnx2 *bp)
625 {
626         int i;
627
628         bnx2_free_tx_mem(bp);
629         bnx2_free_rx_mem(bp);
630
631         for (i = 0; i < bp->ctx_pages; i++) {
632                 if (bp->ctx_blk[i]) {
633                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
634                                             bp->ctx_blk[i],
635                                             bp->ctx_blk_mapping[i]);
636                         bp->ctx_blk[i] = NULL;
637                 }
638         }
639         if (bp->status_blk) {
640                 pci_free_consistent(bp->pdev, bp->status_stats_size,
641                                     bp->status_blk, bp->status_blk_mapping);
642                 bp->status_blk = NULL;
643                 bp->stats_blk = NULL;
644         }
645 }
646
647 static int
648 bnx2_alloc_mem(struct bnx2 *bp)
649 {
650         int i, status_blk_size, err;
651
652         /* Combine status and statistics blocks into one allocation. */
653         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
654         if (bp->flags & BNX2_FLAG_MSIX_CAP)
655                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
656                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
657         bp->status_stats_size = status_blk_size +
658                                 sizeof(struct statistics_block);
659
660         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
661                                               &bp->status_blk_mapping);
662         if (bp->status_blk == NULL)
663                 goto alloc_mem_err;
664
665         memset(bp->status_blk, 0, bp->status_stats_size);
666
667         bp->bnx2_napi[0].status_blk = bp->status_blk;
668         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
669                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
670                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
671
672                         bnapi->status_blk_msix = (void *)
673                                 ((unsigned long) bp->status_blk +
674                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
675                         bnapi->int_num = i << 24;
676                 }
677         }
678
679         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
680                                   status_blk_size);
681
682         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
683
684         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
685                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
686                 if (bp->ctx_pages == 0)
687                         bp->ctx_pages = 1;
688                 for (i = 0; i < bp->ctx_pages; i++) {
689                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
690                                                 BCM_PAGE_SIZE,
691                                                 &bp->ctx_blk_mapping[i]);
692                         if (bp->ctx_blk[i] == NULL)
693                                 goto alloc_mem_err;
694                 }
695         }
696
697         err = bnx2_alloc_rx_mem(bp);
698         if (err)
699                 goto alloc_mem_err;
700
701         err = bnx2_alloc_tx_mem(bp);
702         if (err)
703                 goto alloc_mem_err;
704
705         return 0;
706
707 alloc_mem_err:
708         bnx2_free_mem(bp);
709         return -ENOMEM;
710 }
711
712 static void
713 bnx2_report_fw_link(struct bnx2 *bp)
714 {
715         u32 fw_link_status = 0;
716
717         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
718                 return;
719
720         if (bp->link_up) {
721                 u32 bmsr;
722
723                 switch (bp->line_speed) {
724                 case SPEED_10:
725                         if (bp->duplex == DUPLEX_HALF)
726                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
727                         else
728                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
729                         break;
730                 case SPEED_100:
731                         if (bp->duplex == DUPLEX_HALF)
732                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
733                         else
734                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
735                         break;
736                 case SPEED_1000:
737                         if (bp->duplex == DUPLEX_HALF)
738                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
739                         else
740                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
741                         break;
742                 case SPEED_2500:
743                         if (bp->duplex == DUPLEX_HALF)
744                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
745                         else
746                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
747                         break;
748                 }
749
750                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
751
752                 if (bp->autoneg) {
753                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
754
755                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
756                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
757
758                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
759                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
760                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
761                         else
762                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
763                 }
764         }
765         else
766                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
767
768         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
769 }
770
771 static char *
772 bnx2_xceiver_str(struct bnx2 *bp)
773 {
774         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
775                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
776                  "Copper"));
777 }
778
779 static void
780 bnx2_report_link(struct bnx2 *bp)
781 {
782         if (bp->link_up) {
783                 netif_carrier_on(bp->dev);
784                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
785                        bnx2_xceiver_str(bp));
786
787                 printk("%d Mbps ", bp->line_speed);
788
789                 if (bp->duplex == DUPLEX_FULL)
790                         printk("full duplex");
791                 else
792                         printk("half duplex");
793
794                 if (bp->flow_ctrl) {
795                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
796                                 printk(", receive ");
797                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
798                                         printk("& transmit ");
799                         }
800                         else {
801                                 printk(", transmit ");
802                         }
803                         printk("flow control ON");
804                 }
805                 printk("\n");
806         }
807         else {
808                 netif_carrier_off(bp->dev);
809                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
810                        bnx2_xceiver_str(bp));
811         }
812
813         bnx2_report_fw_link(bp);
814 }
815
816 static void
817 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
818 {
819         u32 local_adv, remote_adv;
820
821         bp->flow_ctrl = 0;
822         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
823                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
824
825                 if (bp->duplex == DUPLEX_FULL) {
826                         bp->flow_ctrl = bp->req_flow_ctrl;
827                 }
828                 return;
829         }
830
831         if (bp->duplex != DUPLEX_FULL) {
832                 return;
833         }
834
835         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
836             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
837                 u32 val;
838
839                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
840                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
841                         bp->flow_ctrl |= FLOW_CTRL_TX;
842                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
843                         bp->flow_ctrl |= FLOW_CTRL_RX;
844                 return;
845         }
846
847         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
848         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
849
850         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
851                 u32 new_local_adv = 0;
852                 u32 new_remote_adv = 0;
853
854                 if (local_adv & ADVERTISE_1000XPAUSE)
855                         new_local_adv |= ADVERTISE_PAUSE_CAP;
856                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
857                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
858                 if (remote_adv & ADVERTISE_1000XPAUSE)
859                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
860                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
861                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
862
863                 local_adv = new_local_adv;
864                 remote_adv = new_remote_adv;
865         }
866
867         /* See Table 28B-3 of 802.3ab-1999 spec. */
868         if (local_adv & ADVERTISE_PAUSE_CAP) {
869                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
870                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
871                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
872                         }
873                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
874                                 bp->flow_ctrl = FLOW_CTRL_RX;
875                         }
876                 }
877                 else {
878                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
879                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
880                         }
881                 }
882         }
883         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
884                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
885                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
886
887                         bp->flow_ctrl = FLOW_CTRL_TX;
888                 }
889         }
890 }
891
892 static int
893 bnx2_5709s_linkup(struct bnx2 *bp)
894 {
895         u32 val, speed;
896
897         bp->link_up = 1;
898
899         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
900         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
901         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
902
903         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
904                 bp->line_speed = bp->req_line_speed;
905                 bp->duplex = bp->req_duplex;
906                 return 0;
907         }
908         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
909         switch (speed) {
910                 case MII_BNX2_GP_TOP_AN_SPEED_10:
911                         bp->line_speed = SPEED_10;
912                         break;
913                 case MII_BNX2_GP_TOP_AN_SPEED_100:
914                         bp->line_speed = SPEED_100;
915                         break;
916                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
917                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
918                         bp->line_speed = SPEED_1000;
919                         break;
920                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
921                         bp->line_speed = SPEED_2500;
922                         break;
923         }
924         if (val & MII_BNX2_GP_TOP_AN_FD)
925                 bp->duplex = DUPLEX_FULL;
926         else
927                 bp->duplex = DUPLEX_HALF;
928         return 0;
929 }
930
931 static int
932 bnx2_5708s_linkup(struct bnx2 *bp)
933 {
934         u32 val;
935
936         bp->link_up = 1;
937         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
938         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
939                 case BCM5708S_1000X_STAT1_SPEED_10:
940                         bp->line_speed = SPEED_10;
941                         break;
942                 case BCM5708S_1000X_STAT1_SPEED_100:
943                         bp->line_speed = SPEED_100;
944                         break;
945                 case BCM5708S_1000X_STAT1_SPEED_1G:
946                         bp->line_speed = SPEED_1000;
947                         break;
948                 case BCM5708S_1000X_STAT1_SPEED_2G5:
949                         bp->line_speed = SPEED_2500;
950                         break;
951         }
952         if (val & BCM5708S_1000X_STAT1_FD)
953                 bp->duplex = DUPLEX_FULL;
954         else
955                 bp->duplex = DUPLEX_HALF;
956
957         return 0;
958 }
959
960 static int
961 bnx2_5706s_linkup(struct bnx2 *bp)
962 {
963         u32 bmcr, local_adv, remote_adv, common;
964
965         bp->link_up = 1;
966         bp->line_speed = SPEED_1000;
967
968         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
969         if (bmcr & BMCR_FULLDPLX) {
970                 bp->duplex = DUPLEX_FULL;
971         }
972         else {
973                 bp->duplex = DUPLEX_HALF;
974         }
975
976         if (!(bmcr & BMCR_ANENABLE)) {
977                 return 0;
978         }
979
980         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
981         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
982
983         common = local_adv & remote_adv;
984         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
985
986                 if (common & ADVERTISE_1000XFULL) {
987                         bp->duplex = DUPLEX_FULL;
988                 }
989                 else {
990                         bp->duplex = DUPLEX_HALF;
991                 }
992         }
993
994         return 0;
995 }
996
997 static int
998 bnx2_copper_linkup(struct bnx2 *bp)
999 {
1000         u32 bmcr;
1001
1002         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1003         if (bmcr & BMCR_ANENABLE) {
1004                 u32 local_adv, remote_adv, common;
1005
1006                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1007                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1008
1009                 common = local_adv & (remote_adv >> 2);
1010                 if (common & ADVERTISE_1000FULL) {
1011                         bp->line_speed = SPEED_1000;
1012                         bp->duplex = DUPLEX_FULL;
1013                 }
1014                 else if (common & ADVERTISE_1000HALF) {
1015                         bp->line_speed = SPEED_1000;
1016                         bp->duplex = DUPLEX_HALF;
1017                 }
1018                 else {
1019                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1020                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1021
1022                         common = local_adv & remote_adv;
1023                         if (common & ADVERTISE_100FULL) {
1024                                 bp->line_speed = SPEED_100;
1025                                 bp->duplex = DUPLEX_FULL;
1026                         }
1027                         else if (common & ADVERTISE_100HALF) {
1028                                 bp->line_speed = SPEED_100;
1029                                 bp->duplex = DUPLEX_HALF;
1030                         }
1031                         else if (common & ADVERTISE_10FULL) {
1032                                 bp->line_speed = SPEED_10;
1033                                 bp->duplex = DUPLEX_FULL;
1034                         }
1035                         else if (common & ADVERTISE_10HALF) {
1036                                 bp->line_speed = SPEED_10;
1037                                 bp->duplex = DUPLEX_HALF;
1038                         }
1039                         else {
1040                                 bp->line_speed = 0;
1041                                 bp->link_up = 0;
1042                         }
1043                 }
1044         }
1045         else {
1046                 if (bmcr & BMCR_SPEED100) {
1047                         bp->line_speed = SPEED_100;
1048                 }
1049                 else {
1050                         bp->line_speed = SPEED_10;
1051                 }
1052                 if (bmcr & BMCR_FULLDPLX) {
1053                         bp->duplex = DUPLEX_FULL;
1054                 }
1055                 else {
1056                         bp->duplex = DUPLEX_HALF;
1057                 }
1058         }
1059
1060         return 0;
1061 }
1062
1063 static void
1064 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1065 {
1066         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1067
1068         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1069         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1070         val |= 0x02 << 8;
1071
1072         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1073                 u32 lo_water, hi_water;
1074
1075                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1076                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1077                 else
1078                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1079                 if (lo_water >= bp->rx_ring_size)
1080                         lo_water = 0;
1081
1082                 hi_water = bp->rx_ring_size / 4;
1083
1084                 if (hi_water <= lo_water)
1085                         lo_water = 0;
1086
1087                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1088                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1089
1090                 if (hi_water > 0xf)
1091                         hi_water = 0xf;
1092                 else if (hi_water == 0)
1093                         lo_water = 0;
1094                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1095         }
1096         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1097 }
1098
1099 static void
1100 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1101 {
1102         int i;
1103         u32 cid;
1104
1105         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1106                 if (i == 1)
1107                         cid = RX_RSS_CID;
1108                 bnx2_init_rx_context(bp, cid);
1109         }
1110 }
1111
1112 static int
1113 bnx2_set_mac_link(struct bnx2 *bp)
1114 {
1115         u32 val;
1116
1117         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1118         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1119                 (bp->duplex == DUPLEX_HALF)) {
1120                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1121         }
1122
1123         /* Configure the EMAC mode register. */
1124         val = REG_RD(bp, BNX2_EMAC_MODE);
1125
1126         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1127                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1128                 BNX2_EMAC_MODE_25G_MODE);
1129
1130         if (bp->link_up) {
1131                 switch (bp->line_speed) {
1132                         case SPEED_10:
1133                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1134                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1135                                         break;
1136                                 }
1137                                 /* fall through */
1138                         case SPEED_100:
1139                                 val |= BNX2_EMAC_MODE_PORT_MII;
1140                                 break;
1141                         case SPEED_2500:
1142                                 val |= BNX2_EMAC_MODE_25G_MODE;
1143                                 /* fall through */
1144                         case SPEED_1000:
1145                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1146                                 break;
1147                 }
1148         }
1149         else {
1150                 val |= BNX2_EMAC_MODE_PORT_GMII;
1151         }
1152
1153         /* Set the MAC to operate in the appropriate duplex mode. */
1154         if (bp->duplex == DUPLEX_HALF)
1155                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1156         REG_WR(bp, BNX2_EMAC_MODE, val);
1157
1158         /* Enable/disable rx PAUSE. */
1159         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1160
1161         if (bp->flow_ctrl & FLOW_CTRL_RX)
1162                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1163         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1164
1165         /* Enable/disable tx PAUSE. */
1166         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1167         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1168
1169         if (bp->flow_ctrl & FLOW_CTRL_TX)
1170                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1171         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1172
1173         /* Acknowledge the interrupt. */
1174         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1175
1176         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1177                 bnx2_init_all_rx_contexts(bp);
1178
1179         return 0;
1180 }
1181
1182 static void
1183 bnx2_enable_bmsr1(struct bnx2 *bp)
1184 {
1185         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1186             (CHIP_NUM(bp) == CHIP_NUM_5709))
1187                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1188                                MII_BNX2_BLK_ADDR_GP_STATUS);
1189 }
1190
1191 static void
1192 bnx2_disable_bmsr1(struct bnx2 *bp)
1193 {
1194         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1195             (CHIP_NUM(bp) == CHIP_NUM_5709))
1196                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1197                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1198 }
1199
1200 static int
1201 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1202 {
1203         u32 up1;
1204         int ret = 1;
1205
1206         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1207                 return 0;
1208
1209         if (bp->autoneg & AUTONEG_SPEED)
1210                 bp->advertising |= ADVERTISED_2500baseX_Full;
1211
1212         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1213                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1214
1215         bnx2_read_phy(bp, bp->mii_up1, &up1);
1216         if (!(up1 & BCM5708S_UP1_2G5)) {
1217                 up1 |= BCM5708S_UP1_2G5;
1218                 bnx2_write_phy(bp, bp->mii_up1, up1);
1219                 ret = 0;
1220         }
1221
1222         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1223                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1224                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1225
1226         return ret;
1227 }
1228
1229 static int
1230 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1231 {
1232         u32 up1;
1233         int ret = 0;
1234
1235         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1236                 return 0;
1237
1238         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1239                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1240
1241         bnx2_read_phy(bp, bp->mii_up1, &up1);
1242         if (up1 & BCM5708S_UP1_2G5) {
1243                 up1 &= ~BCM5708S_UP1_2G5;
1244                 bnx2_write_phy(bp, bp->mii_up1, up1);
1245                 ret = 1;
1246         }
1247
1248         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1249                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1250                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1251
1252         return ret;
1253 }
1254
1255 static void
1256 bnx2_enable_forced_2g5(struct bnx2 *bp)
1257 {
1258         u32 bmcr;
1259
1260         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1261                 return;
1262
1263         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1264                 u32 val;
1265
1266                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1267                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1268                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1269                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1270                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1271                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1272
1273                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1274                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1275                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1276
1277         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1278                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1279                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1280         }
1281
1282         if (bp->autoneg & AUTONEG_SPEED) {
1283                 bmcr &= ~BMCR_ANENABLE;
1284                 if (bp->req_duplex == DUPLEX_FULL)
1285                         bmcr |= BMCR_FULLDPLX;
1286         }
1287         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1288 }
1289
1290 static void
1291 bnx2_disable_forced_2g5(struct bnx2 *bp)
1292 {
1293         u32 bmcr;
1294
1295         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1296                 return;
1297
1298         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1299                 u32 val;
1300
1301                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1302                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1303                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1304                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1305                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1306
1307                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1308                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1309                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1310
1311         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1312                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1313                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1314         }
1315
1316         if (bp->autoneg & AUTONEG_SPEED)
1317                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1318         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1319 }
1320
1321 static void
1322 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1323 {
1324         u32 val;
1325
1326         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1327         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1328         if (start)
1329                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1330         else
1331                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1332 }
1333
1334 static int
1335 bnx2_set_link(struct bnx2 *bp)
1336 {
1337         u32 bmsr;
1338         u8 link_up;
1339
1340         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1341                 bp->link_up = 1;
1342                 return 0;
1343         }
1344
1345         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1346                 return 0;
1347
1348         link_up = bp->link_up;
1349
1350         bnx2_enable_bmsr1(bp);
1351         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1352         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1353         bnx2_disable_bmsr1(bp);
1354
1355         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1356             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1357                 u32 val, an_dbg;
1358
1359                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1360                         bnx2_5706s_force_link_dn(bp, 0);
1361                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1362                 }
1363                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1364
1365                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1366                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1367                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1368
1369                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1370                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1371                         bmsr |= BMSR_LSTATUS;
1372                 else
1373                         bmsr &= ~BMSR_LSTATUS;
1374         }
1375
1376         if (bmsr & BMSR_LSTATUS) {
1377                 bp->link_up = 1;
1378
1379                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1380                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1381                                 bnx2_5706s_linkup(bp);
1382                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1383                                 bnx2_5708s_linkup(bp);
1384                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1385                                 bnx2_5709s_linkup(bp);
1386                 }
1387                 else {
1388                         bnx2_copper_linkup(bp);
1389                 }
1390                 bnx2_resolve_flow_ctrl(bp);
1391         }
1392         else {
1393                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1394                     (bp->autoneg & AUTONEG_SPEED))
1395                         bnx2_disable_forced_2g5(bp);
1396
1397                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1398                         u32 bmcr;
1399
1400                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1401                         bmcr |= BMCR_ANENABLE;
1402                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1403
1404                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1405                 }
1406                 bp->link_up = 0;
1407         }
1408
1409         if (bp->link_up != link_up) {
1410                 bnx2_report_link(bp);
1411         }
1412
1413         bnx2_set_mac_link(bp);
1414
1415         return 0;
1416 }
1417
1418 static int
1419 bnx2_reset_phy(struct bnx2 *bp)
1420 {
1421         int i;
1422         u32 reg;
1423
1424         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1425
1426 #define PHY_RESET_MAX_WAIT 100
1427         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1428                 udelay(10);
1429
1430                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1431                 if (!(reg & BMCR_RESET)) {
1432                         udelay(20);
1433                         break;
1434                 }
1435         }
1436         if (i == PHY_RESET_MAX_WAIT) {
1437                 return -EBUSY;
1438         }
1439         return 0;
1440 }
1441
1442 static u32
1443 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1444 {
1445         u32 adv = 0;
1446
1447         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1448                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1449
1450                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1451                         adv = ADVERTISE_1000XPAUSE;
1452                 }
1453                 else {
1454                         adv = ADVERTISE_PAUSE_CAP;
1455                 }
1456         }
1457         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1458                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1459                         adv = ADVERTISE_1000XPSE_ASYM;
1460                 }
1461                 else {
1462                         adv = ADVERTISE_PAUSE_ASYM;
1463                 }
1464         }
1465         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1466                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1467                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1468                 }
1469                 else {
1470                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1471                 }
1472         }
1473         return adv;
1474 }
1475
1476 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1477
1478 static int
1479 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1480 {
1481         u32 speed_arg = 0, pause_adv;
1482
1483         pause_adv = bnx2_phy_get_pause_adv(bp);
1484
1485         if (bp->autoneg & AUTONEG_SPEED) {
1486                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1487                 if (bp->advertising & ADVERTISED_10baseT_Half)
1488                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1489                 if (bp->advertising & ADVERTISED_10baseT_Full)
1490                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1491                 if (bp->advertising & ADVERTISED_100baseT_Half)
1492                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1493                 if (bp->advertising & ADVERTISED_100baseT_Full)
1494                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1495                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1496                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1497                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1498                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1499         } else {
1500                 if (bp->req_line_speed == SPEED_2500)
1501                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1502                 else if (bp->req_line_speed == SPEED_1000)
1503                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1504                 else if (bp->req_line_speed == SPEED_100) {
1505                         if (bp->req_duplex == DUPLEX_FULL)
1506                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1507                         else
1508                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1509                 } else if (bp->req_line_speed == SPEED_10) {
1510                         if (bp->req_duplex == DUPLEX_FULL)
1511                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1512                         else
1513                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1514                 }
1515         }
1516
1517         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1518                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1519         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1520                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1521
1522         if (port == PORT_TP)
1523                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1524                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1525
1526         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1527
1528         spin_unlock_bh(&bp->phy_lock);
1529         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1530         spin_lock_bh(&bp->phy_lock);
1531
1532         return 0;
1533 }
1534
1535 static int
1536 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1537 {
1538         u32 adv, bmcr;
1539         u32 new_adv = 0;
1540
1541         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1542                 return (bnx2_setup_remote_phy(bp, port));
1543
1544         if (!(bp->autoneg & AUTONEG_SPEED)) {
1545                 u32 new_bmcr;
1546                 int force_link_down = 0;
1547
1548                 if (bp->req_line_speed == SPEED_2500) {
1549                         if (!bnx2_test_and_enable_2g5(bp))
1550                                 force_link_down = 1;
1551                 } else if (bp->req_line_speed == SPEED_1000) {
1552                         if (bnx2_test_and_disable_2g5(bp))
1553                                 force_link_down = 1;
1554                 }
1555                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1556                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1557
1558                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1559                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1560                 new_bmcr |= BMCR_SPEED1000;
1561
1562                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1563                         if (bp->req_line_speed == SPEED_2500)
1564                                 bnx2_enable_forced_2g5(bp);
1565                         else if (bp->req_line_speed == SPEED_1000) {
1566                                 bnx2_disable_forced_2g5(bp);
1567                                 new_bmcr &= ~0x2000;
1568                         }
1569
1570                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1571                         if (bp->req_line_speed == SPEED_2500)
1572                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1573                         else
1574                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1575                 }
1576
1577                 if (bp->req_duplex == DUPLEX_FULL) {
1578                         adv |= ADVERTISE_1000XFULL;
1579                         new_bmcr |= BMCR_FULLDPLX;
1580                 }
1581                 else {
1582                         adv |= ADVERTISE_1000XHALF;
1583                         new_bmcr &= ~BMCR_FULLDPLX;
1584                 }
1585                 if ((new_bmcr != bmcr) || (force_link_down)) {
1586                         /* Force a link down visible on the other side */
1587                         if (bp->link_up) {
1588                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1589                                                ~(ADVERTISE_1000XFULL |
1590                                                  ADVERTISE_1000XHALF));
1591                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1592                                         BMCR_ANRESTART | BMCR_ANENABLE);
1593
1594                                 bp->link_up = 0;
1595                                 netif_carrier_off(bp->dev);
1596                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1597                                 bnx2_report_link(bp);
1598                         }
1599                         bnx2_write_phy(bp, bp->mii_adv, adv);
1600                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1601                 } else {
1602                         bnx2_resolve_flow_ctrl(bp);
1603                         bnx2_set_mac_link(bp);
1604                 }
1605                 return 0;
1606         }
1607
1608         bnx2_test_and_enable_2g5(bp);
1609
1610         if (bp->advertising & ADVERTISED_1000baseT_Full)
1611                 new_adv |= ADVERTISE_1000XFULL;
1612
1613         new_adv |= bnx2_phy_get_pause_adv(bp);
1614
1615         bnx2_read_phy(bp, bp->mii_adv, &adv);
1616         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1617
1618         bp->serdes_an_pending = 0;
1619         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1620                 /* Force a link down visible on the other side */
1621                 if (bp->link_up) {
1622                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1623                         spin_unlock_bh(&bp->phy_lock);
1624                         msleep(20);
1625                         spin_lock_bh(&bp->phy_lock);
1626                 }
1627
1628                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1629                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1630                         BMCR_ANENABLE);
1631                 /* Speed up link-up time when the link partner
1632                  * does not autonegotiate which is very common
1633                  * in blade servers. Some blade servers use
1634                  * IPMI for kerboard input and it's important
1635                  * to minimize link disruptions. Autoneg. involves
1636                  * exchanging base pages plus 3 next pages and
1637                  * normally completes in about 120 msec.
1638                  */
1639                 bp->current_interval = SERDES_AN_TIMEOUT;
1640                 bp->serdes_an_pending = 1;
1641                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1642         } else {
1643                 bnx2_resolve_flow_ctrl(bp);
1644                 bnx2_set_mac_link(bp);
1645         }
1646
1647         return 0;
1648 }
1649
1650 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1651         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1652                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1653                 (ADVERTISED_1000baseT_Full)
1654
1655 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1656         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1657         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1658         ADVERTISED_1000baseT_Full)
1659
1660 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1661         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1662
1663 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1664
1665 static void
1666 bnx2_set_default_remote_link(struct bnx2 *bp)
1667 {
1668         u32 link;
1669
1670         if (bp->phy_port == PORT_TP)
1671                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1672         else
1673                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1674
1675         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1676                 bp->req_line_speed = 0;
1677                 bp->autoneg |= AUTONEG_SPEED;
1678                 bp->advertising = ADVERTISED_Autoneg;
1679                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1680                         bp->advertising |= ADVERTISED_10baseT_Half;
1681                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1682                         bp->advertising |= ADVERTISED_10baseT_Full;
1683                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1684                         bp->advertising |= ADVERTISED_100baseT_Half;
1685                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1686                         bp->advertising |= ADVERTISED_100baseT_Full;
1687                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1688                         bp->advertising |= ADVERTISED_1000baseT_Full;
1689                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1690                         bp->advertising |= ADVERTISED_2500baseX_Full;
1691         } else {
1692                 bp->autoneg = 0;
1693                 bp->advertising = 0;
1694                 bp->req_duplex = DUPLEX_FULL;
1695                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1696                         bp->req_line_speed = SPEED_10;
1697                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698                                 bp->req_duplex = DUPLEX_HALF;
1699                 }
1700                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1701                         bp->req_line_speed = SPEED_100;
1702                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1703                                 bp->req_duplex = DUPLEX_HALF;
1704                 }
1705                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706                         bp->req_line_speed = SPEED_1000;
1707                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708                         bp->req_line_speed = SPEED_2500;
1709         }
1710 }
1711
1712 static void
1713 bnx2_set_default_link(struct bnx2 *bp)
1714 {
1715         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1716                 bnx2_set_default_remote_link(bp);
1717                 return;
1718         }
1719
1720         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1721         bp->req_line_speed = 0;
1722         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1723                 u32 reg;
1724
1725                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1726
1727                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1728                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1729                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1730                         bp->autoneg = 0;
1731                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1732                         bp->req_duplex = DUPLEX_FULL;
1733                 }
1734         } else
1735                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1736 }
1737
1738 static void
1739 bnx2_send_heart_beat(struct bnx2 *bp)
1740 {
1741         u32 msg;
1742         u32 addr;
1743
1744         spin_lock(&bp->indirect_lock);
1745         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1746         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1747         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1748         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1749         spin_unlock(&bp->indirect_lock);
1750 }
1751
1752 static void
1753 bnx2_remote_phy_event(struct bnx2 *bp)
1754 {
1755         u32 msg;
1756         u8 link_up = bp->link_up;
1757         u8 old_port;
1758
1759         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1760
1761         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1762                 bnx2_send_heart_beat(bp);
1763
1764         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1765
1766         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1767                 bp->link_up = 0;
1768         else {
1769                 u32 speed;
1770
1771                 bp->link_up = 1;
1772                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1773                 bp->duplex = DUPLEX_FULL;
1774                 switch (speed) {
1775                         case BNX2_LINK_STATUS_10HALF:
1776                                 bp->duplex = DUPLEX_HALF;
1777                         case BNX2_LINK_STATUS_10FULL:
1778                                 bp->line_speed = SPEED_10;
1779                                 break;
1780                         case BNX2_LINK_STATUS_100HALF:
1781                                 bp->duplex = DUPLEX_HALF;
1782                         case BNX2_LINK_STATUS_100BASE_T4:
1783                         case BNX2_LINK_STATUS_100FULL:
1784                                 bp->line_speed = SPEED_100;
1785                                 break;
1786                         case BNX2_LINK_STATUS_1000HALF:
1787                                 bp->duplex = DUPLEX_HALF;
1788                         case BNX2_LINK_STATUS_1000FULL:
1789                                 bp->line_speed = SPEED_1000;
1790                                 break;
1791                         case BNX2_LINK_STATUS_2500HALF:
1792                                 bp->duplex = DUPLEX_HALF;
1793                         case BNX2_LINK_STATUS_2500FULL:
1794                                 bp->line_speed = SPEED_2500;
1795                                 break;
1796                         default:
1797                                 bp->line_speed = 0;
1798                                 break;
1799                 }
1800
1801                 bp->flow_ctrl = 0;
1802                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1803                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1804                         if (bp->duplex == DUPLEX_FULL)
1805                                 bp->flow_ctrl = bp->req_flow_ctrl;
1806                 } else {
1807                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1808                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1809                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1810                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1811                 }
1812
1813                 old_port = bp->phy_port;
1814                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1815                         bp->phy_port = PORT_FIBRE;
1816                 else
1817                         bp->phy_port = PORT_TP;
1818
1819                 if (old_port != bp->phy_port)
1820                         bnx2_set_default_link(bp);
1821
1822         }
1823         if (bp->link_up != link_up)
1824                 bnx2_report_link(bp);
1825
1826         bnx2_set_mac_link(bp);
1827 }
1828
1829 static int
1830 bnx2_set_remote_link(struct bnx2 *bp)
1831 {
1832         u32 evt_code;
1833
1834         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1835         switch (evt_code) {
1836                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1837                         bnx2_remote_phy_event(bp);
1838                         break;
1839                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1840                 default:
1841                         bnx2_send_heart_beat(bp);
1842                         break;
1843         }
1844         return 0;
1845 }
1846
1847 static int
1848 bnx2_setup_copper_phy(struct bnx2 *bp)
1849 {
1850         u32 bmcr;
1851         u32 new_bmcr;
1852
1853         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1854
1855         if (bp->autoneg & AUTONEG_SPEED) {
1856                 u32 adv_reg, adv1000_reg;
1857                 u32 new_adv_reg = 0;
1858                 u32 new_adv1000_reg = 0;
1859
1860                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1861                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1862                         ADVERTISE_PAUSE_ASYM);
1863
1864                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1865                 adv1000_reg &= PHY_ALL_1000_SPEED;
1866
1867                 if (bp->advertising & ADVERTISED_10baseT_Half)
1868                         new_adv_reg |= ADVERTISE_10HALF;
1869                 if (bp->advertising & ADVERTISED_10baseT_Full)
1870                         new_adv_reg |= ADVERTISE_10FULL;
1871                 if (bp->advertising & ADVERTISED_100baseT_Half)
1872                         new_adv_reg |= ADVERTISE_100HALF;
1873                 if (bp->advertising & ADVERTISED_100baseT_Full)
1874                         new_adv_reg |= ADVERTISE_100FULL;
1875                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1876                         new_adv1000_reg |= ADVERTISE_1000FULL;
1877
1878                 new_adv_reg |= ADVERTISE_CSMA;
1879
1880                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1881
1882                 if ((adv1000_reg != new_adv1000_reg) ||
1883                         (adv_reg != new_adv_reg) ||
1884                         ((bmcr & BMCR_ANENABLE) == 0)) {
1885
1886                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1887                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1888                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1889                                 BMCR_ANENABLE);
1890                 }
1891                 else if (bp->link_up) {
1892                         /* Flow ctrl may have changed from auto to forced */
1893                         /* or vice-versa. */
1894
1895                         bnx2_resolve_flow_ctrl(bp);
1896                         bnx2_set_mac_link(bp);
1897                 }
1898                 return 0;
1899         }
1900
1901         new_bmcr = 0;
1902         if (bp->req_line_speed == SPEED_100) {
1903                 new_bmcr |= BMCR_SPEED100;
1904         }
1905         if (bp->req_duplex == DUPLEX_FULL) {
1906                 new_bmcr |= BMCR_FULLDPLX;
1907         }
1908         if (new_bmcr != bmcr) {
1909                 u32 bmsr;
1910
1911                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1912                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1913
1914                 if (bmsr & BMSR_LSTATUS) {
1915                         /* Force link down */
1916                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1917                         spin_unlock_bh(&bp->phy_lock);
1918                         msleep(50);
1919                         spin_lock_bh(&bp->phy_lock);
1920
1921                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1922                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1923                 }
1924
1925                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1926
1927                 /* Normally, the new speed is setup after the link has
1928                  * gone down and up again. In some cases, link will not go
1929                  * down so we need to set up the new speed here.
1930                  */
1931                 if (bmsr & BMSR_LSTATUS) {
1932                         bp->line_speed = bp->req_line_speed;
1933                         bp->duplex = bp->req_duplex;
1934                         bnx2_resolve_flow_ctrl(bp);
1935                         bnx2_set_mac_link(bp);
1936                 }
1937         } else {
1938                 bnx2_resolve_flow_ctrl(bp);
1939                 bnx2_set_mac_link(bp);
1940         }
1941         return 0;
1942 }
1943
1944 static int
1945 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1946 {
1947         if (bp->loopback == MAC_LOOPBACK)
1948                 return 0;
1949
1950         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1951                 return (bnx2_setup_serdes_phy(bp, port));
1952         }
1953         else {
1954                 return (bnx2_setup_copper_phy(bp));
1955         }
1956 }
1957
1958 static int
1959 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1960 {
1961         u32 val;
1962
1963         bp->mii_bmcr = MII_BMCR + 0x10;
1964         bp->mii_bmsr = MII_BMSR + 0x10;
1965         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1966         bp->mii_adv = MII_ADVERTISE + 0x10;
1967         bp->mii_lpa = MII_LPA + 0x10;
1968         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1969
1970         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1971         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1972
1973         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1974         if (reset_phy)
1975                 bnx2_reset_phy(bp);
1976
1977         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1978
1979         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1980         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1981         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1982         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1983
1984         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1985         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1986         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1987                 val |= BCM5708S_UP1_2G5;
1988         else
1989                 val &= ~BCM5708S_UP1_2G5;
1990         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1991
1992         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1993         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1994         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1995         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1996
1997         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1998
1999         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2000               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2001         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2002
2003         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2004
2005         return 0;
2006 }
2007
2008 static int
2009 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2010 {
2011         u32 val;
2012
2013         if (reset_phy)
2014                 bnx2_reset_phy(bp);
2015
2016         bp->mii_up1 = BCM5708S_UP1;
2017
2018         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2019         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2020         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2021
2022         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2023         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2024         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2025
2026         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2027         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2028         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2029
2030         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2031                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2032                 val |= BCM5708S_UP1_2G5;
2033                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2034         }
2035
2036         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2037             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2038             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2039                 /* increase tx signal amplitude */
2040                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2041                                BCM5708S_BLK_ADDR_TX_MISC);
2042                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2043                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2044                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2045                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2046         }
2047
2048         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2049               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2050
2051         if (val) {
2052                 u32 is_backplane;
2053
2054                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2055                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2056                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2057                                        BCM5708S_BLK_ADDR_TX_MISC);
2058                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2059                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2060                                        BCM5708S_BLK_ADDR_DIG);
2061                 }
2062         }
2063         return 0;
2064 }
2065
2066 static int
2067 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2068 {
2069         if (reset_phy)
2070                 bnx2_reset_phy(bp);
2071
2072         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2073
2074         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2075                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2076
2077         if (bp->dev->mtu > 1500) {
2078                 u32 val;
2079
2080                 /* Set extended packet length bit */
2081                 bnx2_write_phy(bp, 0x18, 0x7);
2082                 bnx2_read_phy(bp, 0x18, &val);
2083                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2084
2085                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2086                 bnx2_read_phy(bp, 0x1c, &val);
2087                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2088         }
2089         else {
2090                 u32 val;
2091
2092                 bnx2_write_phy(bp, 0x18, 0x7);
2093                 bnx2_read_phy(bp, 0x18, &val);
2094                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2095
2096                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2097                 bnx2_read_phy(bp, 0x1c, &val);
2098                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2099         }
2100
2101         return 0;
2102 }
2103
2104 static int
2105 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2106 {
2107         u32 val;
2108
2109         if (reset_phy)
2110                 bnx2_reset_phy(bp);
2111
2112         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2113                 bnx2_write_phy(bp, 0x18, 0x0c00);
2114                 bnx2_write_phy(bp, 0x17, 0x000a);
2115                 bnx2_write_phy(bp, 0x15, 0x310b);
2116                 bnx2_write_phy(bp, 0x17, 0x201f);
2117                 bnx2_write_phy(bp, 0x15, 0x9506);
2118                 bnx2_write_phy(bp, 0x17, 0x401f);
2119                 bnx2_write_phy(bp, 0x15, 0x14e2);
2120                 bnx2_write_phy(bp, 0x18, 0x0400);
2121         }
2122
2123         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2124                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2125                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2126                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2127                 val &= ~(1 << 8);
2128                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2129         }
2130
2131         if (bp->dev->mtu > 1500) {
2132                 /* Set extended packet length bit */
2133                 bnx2_write_phy(bp, 0x18, 0x7);
2134                 bnx2_read_phy(bp, 0x18, &val);
2135                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2136
2137                 bnx2_read_phy(bp, 0x10, &val);
2138                 bnx2_write_phy(bp, 0x10, val | 0x1);
2139         }
2140         else {
2141                 bnx2_write_phy(bp, 0x18, 0x7);
2142                 bnx2_read_phy(bp, 0x18, &val);
2143                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2144
2145                 bnx2_read_phy(bp, 0x10, &val);
2146                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2147         }
2148
2149         /* ethernet@wirespeed */
2150         bnx2_write_phy(bp, 0x18, 0x7007);
2151         bnx2_read_phy(bp, 0x18, &val);
2152         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2153         return 0;
2154 }
2155
2156
2157 static int
2158 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2159 {
2160         u32 val;
2161         int rc = 0;
2162
2163         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2164         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2165
2166         bp->mii_bmcr = MII_BMCR;
2167         bp->mii_bmsr = MII_BMSR;
2168         bp->mii_bmsr1 = MII_BMSR;
2169         bp->mii_adv = MII_ADVERTISE;
2170         bp->mii_lpa = MII_LPA;
2171
2172         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2173
2174         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2175                 goto setup_phy;
2176
2177         bnx2_read_phy(bp, MII_PHYSID1, &val);
2178         bp->phy_id = val << 16;
2179         bnx2_read_phy(bp, MII_PHYSID2, &val);
2180         bp->phy_id |= val & 0xffff;
2181
2182         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2183                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2184                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2185                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2186                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2187                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2188                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2189         }
2190         else {
2191                 rc = bnx2_init_copper_phy(bp, reset_phy);
2192         }
2193
2194 setup_phy:
2195         if (!rc)
2196                 rc = bnx2_setup_phy(bp, bp->phy_port);
2197
2198         return rc;
2199 }
2200
2201 static int
2202 bnx2_set_mac_loopback(struct bnx2 *bp)
2203 {
2204         u32 mac_mode;
2205
2206         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2207         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2208         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2209         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2210         bp->link_up = 1;
2211         return 0;
2212 }
2213
2214 static int bnx2_test_link(struct bnx2 *);
2215
2216 static int
2217 bnx2_set_phy_loopback(struct bnx2 *bp)
2218 {
2219         u32 mac_mode;
2220         int rc, i;
2221
2222         spin_lock_bh(&bp->phy_lock);
2223         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2224                             BMCR_SPEED1000);
2225         spin_unlock_bh(&bp->phy_lock);
2226         if (rc)
2227                 return rc;
2228
2229         for (i = 0; i < 10; i++) {
2230                 if (bnx2_test_link(bp) == 0)
2231                         break;
2232                 msleep(100);
2233         }
2234
2235         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2236         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2237                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2238                       BNX2_EMAC_MODE_25G_MODE);
2239
2240         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2241         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2242         bp->link_up = 1;
2243         return 0;
2244 }
2245
2246 static int
2247 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2248 {
2249         int i;
2250         u32 val;
2251
2252         bp->fw_wr_seq++;
2253         msg_data |= bp->fw_wr_seq;
2254
2255         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2256
2257         /* wait for an acknowledgement. */
2258         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2259                 msleep(10);
2260
2261                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2262
2263                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2264                         break;
2265         }
2266         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2267                 return 0;
2268
2269         /* If we timed out, inform the firmware that this is the case. */
2270         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2271                 if (!silent)
2272                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2273                                             "%x\n", msg_data);
2274
2275                 msg_data &= ~BNX2_DRV_MSG_CODE;
2276                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2277
2278                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2279
2280                 return -EBUSY;
2281         }
2282
2283         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2284                 return -EIO;
2285
2286         return 0;
2287 }
2288
2289 static int
2290 bnx2_init_5709_context(struct bnx2 *bp)
2291 {
2292         int i, ret = 0;
2293         u32 val;
2294
2295         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2296         val |= (BCM_PAGE_BITS - 8) << 16;
2297         REG_WR(bp, BNX2_CTX_COMMAND, val);
2298         for (i = 0; i < 10; i++) {
2299                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2300                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2301                         break;
2302                 udelay(2);
2303         }
2304         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2305                 return -EBUSY;
2306
2307         for (i = 0; i < bp->ctx_pages; i++) {
2308                 int j;
2309
2310                 if (bp->ctx_blk[i])
2311                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2312                 else
2313                         return -ENOMEM;
2314
2315                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2316                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2317                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2318                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2319                        (u64) bp->ctx_blk_mapping[i] >> 32);
2320                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2321                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2322                 for (j = 0; j < 10; j++) {
2323
2324                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2325                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2326                                 break;
2327                         udelay(5);
2328                 }
2329                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2330                         ret = -EBUSY;
2331                         break;
2332                 }
2333         }
2334         return ret;
2335 }
2336
2337 static void
2338 bnx2_init_context(struct bnx2 *bp)
2339 {
2340         u32 vcid;
2341
2342         vcid = 96;
2343         while (vcid) {
2344                 u32 vcid_addr, pcid_addr, offset;
2345                 int i;
2346
2347                 vcid--;
2348
2349                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2350                         u32 new_vcid;
2351
2352                         vcid_addr = GET_PCID_ADDR(vcid);
2353                         if (vcid & 0x8) {
2354                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2355                         }
2356                         else {
2357                                 new_vcid = vcid;
2358                         }
2359                         pcid_addr = GET_PCID_ADDR(new_vcid);
2360                 }
2361                 else {
2362                         vcid_addr = GET_CID_ADDR(vcid);
2363                         pcid_addr = vcid_addr;
2364                 }
2365
2366                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2367                         vcid_addr += (i << PHY_CTX_SHIFT);
2368                         pcid_addr += (i << PHY_CTX_SHIFT);
2369
2370                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2371                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2372
2373                         /* Zero out the context. */
2374                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2375                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2376                 }
2377         }
2378 }
2379
2380 static int
2381 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2382 {
2383         u16 *good_mbuf;
2384         u32 good_mbuf_cnt;
2385         u32 val;
2386
2387         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2388         if (good_mbuf == NULL) {
2389                 printk(KERN_ERR PFX "Failed to allocate memory in "
2390                                     "bnx2_alloc_bad_rbuf\n");
2391                 return -ENOMEM;
2392         }
2393
2394         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2395                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2396
2397         good_mbuf_cnt = 0;
2398
2399         /* Allocate a bunch of mbufs and save the good ones in an array. */
2400         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2401         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2402                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2403                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2404
2405                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2406
2407                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2408
2409                 /* The addresses with Bit 9 set are bad memory blocks. */
2410                 if (!(val & (1 << 9))) {
2411                         good_mbuf[good_mbuf_cnt] = (u16) val;
2412                         good_mbuf_cnt++;
2413                 }
2414
2415                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2416         }
2417
2418         /* Free the good ones back to the mbuf pool thus discarding
2419          * all the bad ones. */
2420         while (good_mbuf_cnt) {
2421                 good_mbuf_cnt--;
2422
2423                 val = good_mbuf[good_mbuf_cnt];
2424                 val = (val << 9) | val | 1;
2425
2426                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2427         }
2428         kfree(good_mbuf);
2429         return 0;
2430 }
2431
2432 static void
2433 bnx2_set_mac_addr(struct bnx2 *bp)
2434 {
2435         u32 val;
2436         u8 *mac_addr = bp->dev->dev_addr;
2437
2438         val = (mac_addr[0] << 8) | mac_addr[1];
2439
2440         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2441
2442         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2443                 (mac_addr[4] << 8) | mac_addr[5];
2444
2445         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2446 }
2447
2448 static inline int
2449 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2450 {
2451         dma_addr_t mapping;
2452         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2453         struct rx_bd *rxbd =
2454                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2455         struct page *page = alloc_page(GFP_ATOMIC);
2456
2457         if (!page)
2458                 return -ENOMEM;
2459         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2460                                PCI_DMA_FROMDEVICE);
2461         rx_pg->page = page;
2462         pci_unmap_addr_set(rx_pg, mapping, mapping);
2463         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2464         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2465         return 0;
2466 }
2467
2468 static void
2469 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2470 {
2471         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2472         struct page *page = rx_pg->page;
2473
2474         if (!page)
2475                 return;
2476
2477         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2478                        PCI_DMA_FROMDEVICE);
2479
2480         __free_page(page);
2481         rx_pg->page = NULL;
2482 }
2483
2484 static inline int
2485 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2486 {
2487         struct sk_buff *skb;
2488         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2489         dma_addr_t mapping;
2490         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2491         unsigned long align;
2492
2493         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2494         if (skb == NULL) {
2495                 return -ENOMEM;
2496         }
2497
2498         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2499                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2500
2501         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2502                 PCI_DMA_FROMDEVICE);
2503
2504         rx_buf->skb = skb;
2505         pci_unmap_addr_set(rx_buf, mapping, mapping);
2506
2507         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2508         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2509
2510         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2511
2512         return 0;
2513 }
2514
2515 static int
2516 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2517 {
2518         struct status_block *sblk = bnapi->status_blk;
2519         u32 new_link_state, old_link_state;
2520         int is_set = 1;
2521
2522         new_link_state = sblk->status_attn_bits & event;
2523         old_link_state = sblk->status_attn_bits_ack & event;
2524         if (new_link_state != old_link_state) {
2525                 if (new_link_state)
2526                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2527                 else
2528                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2529         } else
2530                 is_set = 0;
2531
2532         return is_set;
2533 }
2534
2535 static void
2536 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2537 {
2538         spin_lock(&bp->phy_lock);
2539
2540         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2541                 bnx2_set_link(bp);
2542         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2543                 bnx2_set_remote_link(bp);
2544
2545         spin_unlock(&bp->phy_lock);
2546
2547 }
2548
2549 static inline u16
2550 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2551 {
2552         u16 cons;
2553
2554         if (bnapi->int_num == 0)
2555                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2556         else
2557                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2558
2559         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2560                 cons++;
2561         return cons;
2562 }
2563
2564 static int
2565 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2566 {
2567         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2568         u16 hw_cons, sw_cons, sw_ring_cons;
2569         int tx_pkt = 0;
2570
2571         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2572         sw_cons = txr->tx_cons;
2573
2574         while (sw_cons != hw_cons) {
2575                 struct sw_bd *tx_buf;
2576                 struct sk_buff *skb;
2577                 int i, last;
2578
2579                 sw_ring_cons = TX_RING_IDX(sw_cons);
2580
2581                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2582                 skb = tx_buf->skb;
2583
2584                 /* partial BD completions possible with TSO packets */
2585                 if (skb_is_gso(skb)) {
2586                         u16 last_idx, last_ring_idx;
2587
2588                         last_idx = sw_cons +
2589                                 skb_shinfo(skb)->nr_frags + 1;
2590                         last_ring_idx = sw_ring_cons +
2591                                 skb_shinfo(skb)->nr_frags + 1;
2592                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2593                                 last_idx++;
2594                         }
2595                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2596                                 break;
2597                         }
2598                 }
2599
2600                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2601                         skb_headlen(skb), PCI_DMA_TODEVICE);
2602
2603                 tx_buf->skb = NULL;
2604                 last = skb_shinfo(skb)->nr_frags;
2605
2606                 for (i = 0; i < last; i++) {
2607                         sw_cons = NEXT_TX_BD(sw_cons);
2608
2609                         pci_unmap_page(bp->pdev,
2610                                 pci_unmap_addr(
2611                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2612                                         mapping),
2613                                 skb_shinfo(skb)->frags[i].size,
2614                                 PCI_DMA_TODEVICE);
2615                 }
2616
2617                 sw_cons = NEXT_TX_BD(sw_cons);
2618
2619                 dev_kfree_skb(skb);
2620                 tx_pkt++;
2621                 if (tx_pkt == budget)
2622                         break;
2623
2624                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2625         }
2626
2627         txr->hw_tx_cons = hw_cons;
2628         txr->tx_cons = sw_cons;
2629         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2630          * before checking for netif_queue_stopped().  Without the
2631          * memory barrier, there is a small possibility that bnx2_start_xmit()
2632          * will miss it and cause the queue to be stopped forever.
2633          */
2634         smp_mb();
2635
2636         if (unlikely(netif_queue_stopped(bp->dev)) &&
2637                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2638                 netif_tx_lock(bp->dev);
2639                 if ((netif_queue_stopped(bp->dev)) &&
2640                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2641                         netif_wake_queue(bp->dev);
2642                 netif_tx_unlock(bp->dev);
2643         }
2644         return tx_pkt;
2645 }
2646
2647 static void
2648 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2649                         struct sk_buff *skb, int count)
2650 {
2651         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2652         struct rx_bd *cons_bd, *prod_bd;
2653         dma_addr_t mapping;
2654         int i;
2655         u16 hw_prod = rxr->rx_pg_prod, prod;
2656         u16 cons = rxr->rx_pg_cons;
2657
2658         for (i = 0; i < count; i++) {
2659                 prod = RX_PG_RING_IDX(hw_prod);
2660
2661                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2662                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2663                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2664                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2665
2666                 if (i == 0 && skb) {
2667                         struct page *page;
2668                         struct skb_shared_info *shinfo;
2669
2670                         shinfo = skb_shinfo(skb);
2671                         shinfo->nr_frags--;
2672                         page = shinfo->frags[shinfo->nr_frags].page;
2673                         shinfo->frags[shinfo->nr_frags].page = NULL;
2674                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2675                                                PCI_DMA_FROMDEVICE);
2676                         cons_rx_pg->page = page;
2677                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2678                         dev_kfree_skb(skb);
2679                 }
2680                 if (prod != cons) {
2681                         prod_rx_pg->page = cons_rx_pg->page;
2682                         cons_rx_pg->page = NULL;
2683                         pci_unmap_addr_set(prod_rx_pg, mapping,
2684                                 pci_unmap_addr(cons_rx_pg, mapping));
2685
2686                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2687                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2688
2689                 }
2690                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2691                 hw_prod = NEXT_RX_BD(hw_prod);
2692         }
2693         rxr->rx_pg_prod = hw_prod;
2694         rxr->rx_pg_cons = cons;
2695 }
2696
2697 static inline void
2698 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2699                   struct sk_buff *skb, u16 cons, u16 prod)
2700 {
2701         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2702         struct rx_bd *cons_bd, *prod_bd;
2703
2704         cons_rx_buf = &rxr->rx_buf_ring[cons];
2705         prod_rx_buf = &rxr->rx_buf_ring[prod];
2706
2707         pci_dma_sync_single_for_device(bp->pdev,
2708                 pci_unmap_addr(cons_rx_buf, mapping),
2709                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2710
2711         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2712
2713         prod_rx_buf->skb = skb;
2714
2715         if (cons == prod)
2716                 return;
2717
2718         pci_unmap_addr_set(prod_rx_buf, mapping,
2719                         pci_unmap_addr(cons_rx_buf, mapping));
2720
2721         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2722         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2723         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2724         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2725 }
2726
2727 static int
2728 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2729             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2730             u32 ring_idx)
2731 {
2732         int err;
2733         u16 prod = ring_idx & 0xffff;
2734
2735         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2736         if (unlikely(err)) {
2737                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2738                 if (hdr_len) {
2739                         unsigned int raw_len = len + 4;
2740                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2741
2742                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2743                 }
2744                 return err;
2745         }
2746
2747         skb_reserve(skb, BNX2_RX_OFFSET);
2748         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2749                          PCI_DMA_FROMDEVICE);
2750
2751         if (hdr_len == 0) {
2752                 skb_put(skb, len);
2753                 return 0;
2754         } else {
2755                 unsigned int i, frag_len, frag_size, pages;
2756                 struct sw_pg *rx_pg;
2757                 u16 pg_cons = rxr->rx_pg_cons;
2758                 u16 pg_prod = rxr->rx_pg_prod;
2759
2760                 frag_size = len + 4 - hdr_len;
2761                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2762                 skb_put(skb, hdr_len);
2763
2764                 for (i = 0; i < pages; i++) {
2765                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2766                         if (unlikely(frag_len <= 4)) {
2767                                 unsigned int tail = 4 - frag_len;
2768
2769                                 rxr->rx_pg_cons = pg_cons;
2770                                 rxr->rx_pg_prod = pg_prod;
2771                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2772                                                         pages - i);
2773                                 skb->len -= tail;
2774                                 if (i == 0) {
2775                                         skb->tail -= tail;
2776                                 } else {
2777                                         skb_frag_t *frag =
2778                                                 &skb_shinfo(skb)->frags[i - 1];
2779                                         frag->size -= tail;
2780                                         skb->data_len -= tail;
2781                                         skb->truesize -= tail;
2782                                 }
2783                                 return 0;
2784                         }
2785                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2786
2787                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2788                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2789
2790                         if (i == pages - 1)
2791                                 frag_len -= 4;
2792
2793                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2794                         rx_pg->page = NULL;
2795
2796                         err = bnx2_alloc_rx_page(bp, rxr,
2797                                                  RX_PG_RING_IDX(pg_prod));
2798                         if (unlikely(err)) {
2799                                 rxr->rx_pg_cons = pg_cons;
2800                                 rxr->rx_pg_prod = pg_prod;
2801                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2802                                                         pages - i);
2803                                 return err;
2804                         }
2805
2806                         frag_size -= frag_len;
2807                         skb->data_len += frag_len;
2808                         skb->truesize += frag_len;
2809                         skb->len += frag_len;
2810
2811                         pg_prod = NEXT_RX_BD(pg_prod);
2812                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2813                 }
2814                 rxr->rx_pg_prod = pg_prod;
2815                 rxr->rx_pg_cons = pg_cons;
2816         }
2817         return 0;
2818 }
2819
2820 static inline u16
2821 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2822 {
2823         u16 cons;
2824
2825         if (bnapi->int_num == 0)
2826                 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2827         else
2828                 cons = bnapi->status_blk_msix->status_rx_quick_consumer_index;
2829
2830         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2831                 cons++;
2832         return cons;
2833 }
2834
2835 static int
2836 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2837 {
2838         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2839         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2840         struct l2_fhdr *rx_hdr;
2841         int rx_pkt = 0, pg_ring_used = 0;
2842
2843         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2844         sw_cons = rxr->rx_cons;
2845         sw_prod = rxr->rx_prod;
2846
2847         /* Memory barrier necessary as speculative reads of the rx
2848          * buffer can be ahead of the index in the status block
2849          */
2850         rmb();
2851         while (sw_cons != hw_cons) {
2852                 unsigned int len, hdr_len;
2853                 u32 status;
2854                 struct sw_bd *rx_buf;
2855                 struct sk_buff *skb;
2856                 dma_addr_t dma_addr;
2857
2858                 sw_ring_cons = RX_RING_IDX(sw_cons);
2859                 sw_ring_prod = RX_RING_IDX(sw_prod);
2860
2861                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2862                 skb = rx_buf->skb;
2863
2864                 rx_buf->skb = NULL;
2865
2866                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2867
2868                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2869                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2870                         PCI_DMA_FROMDEVICE);
2871
2872                 rx_hdr = (struct l2_fhdr *) skb->data;
2873                 len = rx_hdr->l2_fhdr_pkt_len;
2874
2875                 if ((status = rx_hdr->l2_fhdr_status) &
2876                         (L2_FHDR_ERRORS_BAD_CRC |
2877                         L2_FHDR_ERRORS_PHY_DECODE |
2878                         L2_FHDR_ERRORS_ALIGNMENT |
2879                         L2_FHDR_ERRORS_TOO_SHORT |
2880                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2881
2882                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2883                                           sw_ring_prod);
2884                         goto next_rx;
2885                 }
2886                 hdr_len = 0;
2887                 if (status & L2_FHDR_STATUS_SPLIT) {
2888                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2889                         pg_ring_used = 1;
2890                 } else if (len > bp->rx_jumbo_thresh) {
2891                         hdr_len = bp->rx_jumbo_thresh;
2892                         pg_ring_used = 1;
2893                 }
2894
2895                 len -= 4;
2896
2897                 if (len <= bp->rx_copy_thresh) {
2898                         struct sk_buff *new_skb;
2899
2900                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2901                         if (new_skb == NULL) {
2902                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2903                                                   sw_ring_prod);
2904                                 goto next_rx;
2905                         }
2906
2907                         /* aligned copy */
2908                         skb_copy_from_linear_data_offset(skb,
2909                                                          BNX2_RX_OFFSET - 2,
2910                                       new_skb->data, len + 2);
2911                         skb_reserve(new_skb, 2);
2912                         skb_put(new_skb, len);
2913
2914                         bnx2_reuse_rx_skb(bp, rxr, skb,
2915                                 sw_ring_cons, sw_ring_prod);
2916
2917                         skb = new_skb;
2918                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2919                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2920                         goto next_rx;
2921
2922                 skb->protocol = eth_type_trans(skb, bp->dev);
2923
2924                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2925                         (ntohs(skb->protocol) != 0x8100)) {
2926
2927                         dev_kfree_skb(skb);
2928                         goto next_rx;
2929
2930                 }
2931
2932                 skb->ip_summed = CHECKSUM_NONE;
2933                 if (bp->rx_csum &&
2934                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2935                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2936
2937                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2938                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2939                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2940                 }
2941
2942 #ifdef BCM_VLAN
2943                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2944                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2945                                 rx_hdr->l2_fhdr_vlan_tag);
2946                 }
2947                 else
2948 #endif
2949                         netif_receive_skb(skb);
2950
2951                 bp->dev->last_rx = jiffies;
2952                 rx_pkt++;
2953
2954 next_rx:
2955                 sw_cons = NEXT_RX_BD(sw_cons);
2956                 sw_prod = NEXT_RX_BD(sw_prod);
2957
2958                 if ((rx_pkt == budget))
2959                         break;
2960
2961                 /* Refresh hw_cons to see if there is new work */
2962                 if (sw_cons == hw_cons) {
2963                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2964                         rmb();
2965                 }
2966         }
2967         rxr->rx_cons = sw_cons;
2968         rxr->rx_prod = sw_prod;
2969
2970         if (pg_ring_used)
2971                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2972
2973         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2974
2975         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2976
2977         mmiowb();
2978
2979         return rx_pkt;
2980
2981 }
2982
2983 /* MSI ISR - The only difference between this and the INTx ISR
2984  * is that the MSI interrupt is always serviced.
2985  */
2986 static irqreturn_t
2987 bnx2_msi(int irq, void *dev_instance)
2988 {
2989         struct net_device *dev = dev_instance;
2990         struct bnx2 *bp = netdev_priv(dev);
2991         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2992
2993         prefetch(bnapi->status_blk);
2994         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2995                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2996                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2997
2998         /* Return here if interrupt is disabled. */
2999         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3000                 return IRQ_HANDLED;
3001
3002         netif_rx_schedule(dev, &bnapi->napi);
3003
3004         return IRQ_HANDLED;
3005 }
3006
3007 static irqreturn_t
3008 bnx2_msi_1shot(int irq, void *dev_instance)
3009 {
3010         struct net_device *dev = dev_instance;
3011         struct bnx2 *bp = netdev_priv(dev);
3012         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3013
3014         prefetch(bnapi->status_blk);
3015
3016         /* Return here if interrupt is disabled. */
3017         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3018                 return IRQ_HANDLED;
3019
3020         netif_rx_schedule(dev, &bnapi->napi);
3021
3022         return IRQ_HANDLED;
3023 }
3024
3025 static irqreturn_t
3026 bnx2_interrupt(int irq, void *dev_instance)
3027 {
3028         struct net_device *dev = dev_instance;
3029         struct bnx2 *bp = netdev_priv(dev);
3030         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3031         struct status_block *sblk = bnapi->status_blk;
3032
3033         /* When using INTx, it is possible for the interrupt to arrive
3034          * at the CPU before the status block posted prior to the
3035          * interrupt. Reading a register will flush the status block.
3036          * When using MSI, the MSI message will always complete after
3037          * the status block write.
3038          */
3039         if ((sblk->status_idx == bnapi->last_status_idx) &&
3040             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3041              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3042                 return IRQ_NONE;
3043
3044         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3045                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3046                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3047
3048         /* Read back to deassert IRQ immediately to avoid too many
3049          * spurious interrupts.
3050          */
3051         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3052
3053         /* Return here if interrupt is shared and is disabled. */
3054         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3055                 return IRQ_HANDLED;
3056
3057         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3058                 bnapi->last_status_idx = sblk->status_idx;
3059                 __netif_rx_schedule(dev, &bnapi->napi);
3060         }
3061
3062         return IRQ_HANDLED;
3063 }
3064
3065 static irqreturn_t
3066 bnx2_tx_msix(int irq, void *dev_instance)
3067 {
3068         struct net_device *dev = dev_instance;
3069         struct bnx2 *bp = netdev_priv(dev);
3070         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
3071
3072         prefetch(bnapi->status_blk_msix);
3073
3074         /* Return here if interrupt is disabled. */
3075         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3076                 return IRQ_HANDLED;
3077
3078         netif_rx_schedule(dev, &bnapi->napi);
3079         return IRQ_HANDLED;
3080 }
3081
3082 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3083                                  STATUS_ATTN_BITS_TIMER_ABORT)
3084
3085 static inline int
3086 bnx2_has_work(struct bnx2_napi *bnapi)
3087 {
3088         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3089         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3090         struct status_block *sblk = bnapi->status_blk;
3091
3092         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3093             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3094                 return 1;
3095
3096         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3097             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3098                 return 1;
3099
3100         return 0;
3101 }
3102
3103 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3104 {
3105         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3106         struct bnx2 *bp = bnapi->bp;
3107         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3108         int work_done = 0;
3109         struct status_block_msix *sblk = bnapi->status_blk_msix;
3110
3111         do {
3112                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3113                 if (unlikely(work_done >= budget))
3114                         return work_done;
3115
3116                 bnapi->last_status_idx = sblk->status_idx;
3117                 rmb();
3118         } while (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons);
3119
3120         netif_rx_complete(bp->dev, napi);
3121         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3122                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3123                bnapi->last_status_idx);
3124         return work_done;
3125 }
3126
3127 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3128                           int work_done, int budget)
3129 {
3130         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3131         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3132         struct status_block *sblk = bnapi->status_blk;
3133         u32 status_attn_bits = sblk->status_attn_bits;
3134         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3135
3136         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3137             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3138
3139                 bnx2_phy_int(bp, bnapi);
3140
3141                 /* This is needed to take care of transient status
3142                  * during link changes.
3143                  */
3144                 REG_WR(bp, BNX2_HC_COMMAND,
3145                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3146                 REG_RD(bp, BNX2_HC_COMMAND);
3147         }
3148
3149         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3150                 bnx2_tx_int(bp, bnapi, 0);
3151
3152         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3153                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3154
3155         return work_done;
3156 }
3157
3158 static int bnx2_poll(struct napi_struct *napi, int budget)
3159 {
3160         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3161         struct bnx2 *bp = bnapi->bp;
3162         int work_done = 0;
3163         struct status_block *sblk = bnapi->status_blk;
3164
3165         while (1) {
3166                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3167
3168                 if (unlikely(work_done >= budget))
3169                         break;
3170
3171                 /* bnapi->last_status_idx is used below to tell the hw how
3172                  * much work has been processed, so we must read it before
3173                  * checking for more work.
3174                  */
3175                 bnapi->last_status_idx = sblk->status_idx;
3176                 rmb();
3177                 if (likely(!bnx2_has_work(bnapi))) {
3178                         netif_rx_complete(bp->dev, napi);
3179                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3180                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3181                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3182                                        bnapi->last_status_idx);
3183                                 break;
3184                         }
3185                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3186                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3187                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3188                                bnapi->last_status_idx);
3189
3190                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3191                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3192                                bnapi->last_status_idx);
3193                         break;
3194                 }
3195         }
3196
3197         return work_done;
3198 }
3199
3200 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3201  * from set_multicast.
3202  */
3203 static void
3204 bnx2_set_rx_mode(struct net_device *dev)
3205 {
3206         struct bnx2 *bp = netdev_priv(dev);
3207         u32 rx_mode, sort_mode;
3208         int i;
3209
3210         spin_lock_bh(&bp->phy_lock);
3211
3212         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3213                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3214         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3215 #ifdef BCM_VLAN
3216         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3217                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3218 #else
3219         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3220                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3221 #endif
3222         if (dev->flags & IFF_PROMISC) {
3223                 /* Promiscuous mode. */
3224                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3225                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3226                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3227         }
3228         else if (dev->flags & IFF_ALLMULTI) {
3229                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3230                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3231                                0xffffffff);
3232                 }
3233                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3234         }
3235         else {
3236                 /* Accept one or more multicast(s). */
3237                 struct dev_mc_list *mclist;
3238                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3239                 u32 regidx;
3240                 u32 bit;
3241                 u32 crc;
3242
3243                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3244
3245                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3246                      i++, mclist = mclist->next) {
3247
3248                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3249                         bit = crc & 0xff;
3250                         regidx = (bit & 0xe0) >> 5;
3251                         bit &= 0x1f;
3252                         mc_filter[regidx] |= (1 << bit);
3253                 }
3254
3255                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3256                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3257                                mc_filter[i]);
3258                 }
3259
3260                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3261         }
3262
3263         if (rx_mode != bp->rx_mode) {
3264                 bp->rx_mode = rx_mode;
3265                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3266         }
3267
3268         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3269         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3270         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3271
3272         spin_unlock_bh(&bp->phy_lock);
3273 }
3274
3275 static void
3276 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3277         u32 rv2p_proc)
3278 {
3279         int i;
3280         u32 val;
3281
3282         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3283                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3284                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3285                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3286                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3287         }
3288
3289         for (i = 0; i < rv2p_code_len; i += 8) {
3290                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3291                 rv2p_code++;
3292                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3293                 rv2p_code++;
3294
3295                 if (rv2p_proc == RV2P_PROC1) {
3296                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3297                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3298                 }
3299                 else {
3300                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3301                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3302                 }
3303         }
3304
3305         /* Reset the processor, un-stall is done later. */
3306         if (rv2p_proc == RV2P_PROC1) {
3307                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3308         }
3309         else {
3310                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3311         }
3312 }
3313
3314 static int
3315 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3316 {
3317         u32 offset;
3318         u32 val;
3319         int rc;
3320
3321         /* Halt the CPU. */
3322         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3323         val |= cpu_reg->mode_value_halt;
3324         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3325         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3326
3327         /* Load the Text area. */
3328         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3329         if (fw->gz_text) {
3330                 int j;
3331
3332                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3333                                        fw->gz_text_len);
3334                 if (rc < 0)
3335                         return rc;
3336
3337                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3338                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3339                 }
3340         }
3341
3342         /* Load the Data area. */
3343         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3344         if (fw->data) {
3345                 int j;
3346
3347                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3348                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3349                 }
3350         }
3351
3352         /* Load the SBSS area. */
3353         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3354         if (fw->sbss_len) {
3355                 int j;
3356
3357                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3358                         bnx2_reg_wr_ind(bp, offset, 0);
3359                 }
3360         }
3361
3362         /* Load the BSS area. */
3363         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3364         if (fw->bss_len) {
3365                 int j;
3366
3367                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3368                         bnx2_reg_wr_ind(bp, offset, 0);
3369                 }
3370         }
3371
3372         /* Load the Read-Only area. */
3373         offset = cpu_reg->spad_base +
3374                 (fw->rodata_addr - cpu_reg->mips_view_base);
3375         if (fw->rodata) {
3376                 int j;
3377
3378                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3379                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3380                 }
3381         }
3382
3383         /* Clear the pre-fetch instruction. */
3384         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3385         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3386
3387         /* Start the CPU. */
3388         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3389         val &= ~cpu_reg->mode_value_halt;
3390         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3391         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3392
3393         return 0;
3394 }
3395
3396 static int
3397 bnx2_init_cpus(struct bnx2 *bp)
3398 {
3399         struct fw_info *fw;
3400         int rc, rv2p_len;
3401         void *text, *rv2p;
3402
3403         /* Initialize the RV2P processor. */
3404         text = vmalloc(FW_BUF_SIZE);
3405         if (!text)
3406                 return -ENOMEM;
3407         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3408                 rv2p = bnx2_xi_rv2p_proc1;
3409                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3410         } else {
3411                 rv2p = bnx2_rv2p_proc1;
3412                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3413         }
3414         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3415         if (rc < 0)
3416                 goto init_cpu_err;
3417
3418         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3419
3420         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3421                 rv2p = bnx2_xi_rv2p_proc2;
3422                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3423         } else {
3424                 rv2p = bnx2_rv2p_proc2;
3425                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3426         }
3427         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3428         if (rc < 0)
3429                 goto init_cpu_err;
3430
3431         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3432
3433         /* Initialize the RX Processor. */
3434         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3435                 fw = &bnx2_rxp_fw_09;
3436         else
3437                 fw = &bnx2_rxp_fw_06;
3438
3439         fw->text = text;
3440         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3441         if (rc)
3442                 goto init_cpu_err;
3443
3444         /* Initialize the TX Processor. */
3445         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446                 fw = &bnx2_txp_fw_09;
3447         else
3448                 fw = &bnx2_txp_fw_06;
3449
3450         fw->text = text;
3451         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3452         if (rc)
3453                 goto init_cpu_err;
3454
3455         /* Initialize the TX Patch-up Processor. */
3456         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3457                 fw = &bnx2_tpat_fw_09;
3458         else
3459                 fw = &bnx2_tpat_fw_06;
3460
3461         fw->text = text;
3462         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3463         if (rc)
3464                 goto init_cpu_err;
3465
3466         /* Initialize the Completion Processor. */
3467         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3468                 fw = &bnx2_com_fw_09;
3469         else
3470                 fw = &bnx2_com_fw_06;
3471
3472         fw->text = text;
3473         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3474         if (rc)
3475                 goto init_cpu_err;
3476
3477         /* Initialize the Command Processor. */
3478         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3479                 fw = &bnx2_cp_fw_09;
3480         else
3481                 fw = &bnx2_cp_fw_06;
3482
3483         fw->text = text;
3484         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3485
3486 init_cpu_err:
3487         vfree(text);
3488         return rc;
3489 }
3490
3491 static int
3492 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3493 {
3494         u16 pmcsr;
3495
3496         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3497
3498         switch (state) {
3499         case PCI_D0: {
3500                 u32 val;
3501
3502                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3503                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3504                         PCI_PM_CTRL_PME_STATUS);
3505
3506                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3507                         /* delay required during transition out of D3hot */
3508                         msleep(20);
3509
3510                 val = REG_RD(bp, BNX2_EMAC_MODE);
3511                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3512                 val &= ~BNX2_EMAC_MODE_MPKT;
3513                 REG_WR(bp, BNX2_EMAC_MODE, val);
3514
3515                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3516                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3517                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3518                 break;
3519         }
3520         case PCI_D3hot: {
3521                 int i;
3522                 u32 val, wol_msg;
3523
3524                 if (bp->wol) {
3525                         u32 advertising;
3526                         u8 autoneg;
3527
3528                         autoneg = bp->autoneg;
3529                         advertising = bp->advertising;
3530
3531                         if (bp->phy_port == PORT_TP) {
3532                                 bp->autoneg = AUTONEG_SPEED;
3533                                 bp->advertising = ADVERTISED_10baseT_Half |
3534                                         ADVERTISED_10baseT_Full |
3535                                         ADVERTISED_100baseT_Half |
3536                                         ADVERTISED_100baseT_Full |
3537                                         ADVERTISED_Autoneg;
3538                         }
3539
3540                         spin_lock_bh(&bp->phy_lock);
3541                         bnx2_setup_phy(bp, bp->phy_port);
3542                         spin_unlock_bh(&bp->phy_lock);
3543
3544                         bp->autoneg = autoneg;
3545                         bp->advertising = advertising;
3546
3547                         bnx2_set_mac_addr(bp);
3548
3549                         val = REG_RD(bp, BNX2_EMAC_MODE);
3550
3551                         /* Enable port mode. */
3552                         val &= ~BNX2_EMAC_MODE_PORT;
3553                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3554                                BNX2_EMAC_MODE_ACPI_RCVD |
3555                                BNX2_EMAC_MODE_MPKT;
3556                         if (bp->phy_port == PORT_TP)
3557                                 val |= BNX2_EMAC_MODE_PORT_MII;
3558                         else {
3559                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3560                                 if (bp->line_speed == SPEED_2500)
3561                                         val |= BNX2_EMAC_MODE_25G_MODE;
3562                         }
3563
3564                         REG_WR(bp, BNX2_EMAC_MODE, val);
3565
3566                         /* receive all multicast */
3567                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3568                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3569                                        0xffffffff);
3570                         }
3571                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3572                                BNX2_EMAC_RX_MODE_SORT_MODE);
3573
3574                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3575                               BNX2_RPM_SORT_USER0_MC_EN;
3576                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3577                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3578                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3579                                BNX2_RPM_SORT_USER0_ENA);
3580
3581                         /* Need to enable EMAC and RPM for WOL. */
3582                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3583                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3584                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3585                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3586
3587                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3588                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3589                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3590
3591                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3592                 }
3593                 else {
3594                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3595                 }
3596
3597                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3598                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3599
3600                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3601                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3602                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3603
3604                         if (bp->wol)
3605                                 pmcsr |= 3;
3606                 }
3607                 else {
3608                         pmcsr |= 3;
3609                 }
3610                 if (bp->wol) {
3611                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3612                 }
3613                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3614                                       pmcsr);
3615
3616                 /* No more memory access after this point until
3617                  * device is brought back to D0.
3618                  */
3619                 udelay(50);
3620                 break;
3621         }
3622         default:
3623                 return -EINVAL;
3624         }
3625         return 0;
3626 }
3627
3628 static int
3629 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3630 {
3631         u32 val;
3632         int j;
3633
3634         /* Request access to the flash interface. */
3635         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3636         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3637                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3638                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3639                         break;
3640
3641                 udelay(5);
3642         }
3643
3644         if (j >= NVRAM_TIMEOUT_COUNT)
3645                 return -EBUSY;
3646
3647         return 0;
3648 }
3649
3650 static int
3651 bnx2_release_nvram_lock(struct bnx2 *bp)
3652 {
3653         int j;
3654         u32 val;
3655
3656         /* Relinquish nvram interface. */
3657         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3658
3659         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3660                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3661                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3662                         break;
3663
3664                 udelay(5);
3665         }
3666
3667         if (j >= NVRAM_TIMEOUT_COUNT)
3668                 return -EBUSY;
3669
3670         return 0;
3671 }
3672
3673
3674 static int
3675 bnx2_enable_nvram_write(struct bnx2 *bp)
3676 {
3677         u32 val;
3678
3679         val = REG_RD(bp, BNX2_MISC_CFG);
3680         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3681
3682         if (bp->flash_info->flags & BNX2_NV_WREN) {
3683                 int j;
3684
3685                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3686                 REG_WR(bp, BNX2_NVM_COMMAND,
3687                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3688
3689                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3690                         udelay(5);
3691
3692                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3693                         if (val & BNX2_NVM_COMMAND_DONE)
3694                                 break;
3695                 }
3696
3697                 if (j >= NVRAM_TIMEOUT_COUNT)
3698                         return -EBUSY;
3699         }
3700         return 0;
3701 }
3702
3703 static void
3704 bnx2_disable_nvram_write(struct bnx2 *bp)
3705 {
3706         u32 val;
3707
3708         val = REG_RD(bp, BNX2_MISC_CFG);
3709         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3710 }
3711
3712
3713 static void
3714 bnx2_enable_nvram_access(struct bnx2 *bp)
3715 {
3716         u32 val;
3717
3718         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3719         /* Enable both bits, even on read. */
3720         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3721                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3722 }
3723
3724 static void
3725 bnx2_disable_nvram_access(struct bnx2 *bp)
3726 {
3727         u32 val;
3728
3729         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3730         /* Disable both bits, even after read. */
3731         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3732                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3733                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3734 }
3735
3736 static int
3737 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3738 {
3739         u32 cmd;
3740         int j;
3741
3742         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3743                 /* Buffered flash, no erase needed */
3744                 return 0;
3745
3746         /* Build an erase command */
3747         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3748               BNX2_NVM_COMMAND_DOIT;
3749
3750         /* Need to clear DONE bit separately. */
3751         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3752
3753         /* Address of the NVRAM to read from. */
3754         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3755
3756         /* Issue an erase command. */
3757         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3758
3759         /* Wait for completion. */
3760         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3761                 u32 val;
3762
3763                 udelay(5);
3764
3765                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3766                 if (val & BNX2_NVM_COMMAND_DONE)
3767                         break;
3768         }
3769
3770         if (j >= NVRAM_TIMEOUT_COUNT)
3771                 return -EBUSY;
3772
3773         return 0;
3774 }
3775
3776 static int
3777 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3778 {
3779         u32 cmd;
3780         int j;
3781
3782         /* Build the command word. */
3783         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3784
3785         /* Calculate an offset of a buffered flash, not needed for 5709. */
3786         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3787                 offset = ((offset / bp->flash_info->page_size) <<
3788                            bp->flash_info->page_bits) +
3789                           (offset % bp->flash_info->page_size);
3790         }
3791
3792         /* Need to clear DONE bit separately. */
3793         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3794
3795         /* Address of the NVRAM to read from. */
3796         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3797
3798         /* Issue a read command. */
3799         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3800
3801         /* Wait for completion. */
3802         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3803                 u32 val;
3804
3805                 udelay(5);
3806
3807                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3808                 if (val & BNX2_NVM_COMMAND_DONE) {
3809                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3810                         memcpy(ret_val, &v, 4);
3811                         break;
3812                 }
3813         }
3814         if (j >= NVRAM_TIMEOUT_COUNT)
3815                 return -EBUSY;
3816
3817         return 0;
3818 }
3819
3820
3821 static int
3822 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3823 {
3824         u32 cmd;
3825         __be32 val32;
3826         int j;
3827
3828         /* Build the command word. */
3829         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3830
3831         /* Calculate an offset of a buffered flash, not needed for 5709. */
3832         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3833                 offset = ((offset / bp->flash_info->page_size) <<
3834                           bp->flash_info->page_bits) +
3835                          (offset % bp->flash_info->page_size);
3836         }
3837
3838         /* Need to clear DONE bit separately. */
3839         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3840
3841         memcpy(&val32, val, 4);
3842
3843         /* Write the data. */
3844         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3845
3846         /* Address of the NVRAM to write to. */
3847         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3848
3849         /* Issue the write command. */
3850         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3851
3852         /* Wait for completion. */
3853         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3854                 udelay(5);
3855
3856                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3857                         break;
3858         }
3859         if (j >= NVRAM_TIMEOUT_COUNT)
3860                 return -EBUSY;
3861
3862         return 0;
3863 }
3864
3865 static int
3866 bnx2_init_nvram(struct bnx2 *bp)
3867 {
3868         u32 val;
3869         int j, entry_count, rc = 0;
3870         struct flash_spec *flash;
3871
3872         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3873                 bp->flash_info = &flash_5709;
3874                 goto get_flash_size;
3875         }
3876
3877         /* Determine the selected interface. */
3878         val = REG_RD(bp, BNX2_NVM_CFG1);
3879
3880         entry_count = ARRAY_SIZE(flash_table);
3881
3882         if (val & 0x40000000) {
3883
3884                 /* Flash interface has been reconfigured */
3885                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3886                      j++, flash++) {
3887                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3888                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3889                                 bp->flash_info = flash;
3890                                 break;
3891                         }
3892                 }
3893         }
3894         else {
3895                 u32 mask;
3896                 /* Not yet been reconfigured */
3897
3898                 if (val & (1 << 23))
3899                         mask = FLASH_BACKUP_STRAP_MASK;
3900                 else
3901                         mask = FLASH_STRAP_MASK;
3902
3903                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3904                         j++, flash++) {
3905
3906                         if ((val & mask) == (flash->strapping & mask)) {
3907                                 bp->flash_info = flash;
3908
3909                                 /* Request access to the flash interface. */
3910                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3911                                         return rc;
3912
3913                                 /* Enable access to flash interface */
3914                                 bnx2_enable_nvram_access(bp);
3915
3916                                 /* Reconfigure the flash interface */
3917                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3918                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3919                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3920                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3921
3922                                 /* Disable access to flash interface */
3923                                 bnx2_disable_nvram_access(bp);
3924                                 bnx2_release_nvram_lock(bp);
3925
3926                                 break;
3927                         }
3928                 }
3929         } /* if (val & 0x40000000) */
3930
3931         if (j == entry_count) {
3932                 bp->flash_info = NULL;
3933                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3934                 return -ENODEV;
3935         }
3936
3937 get_flash_size:
3938         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3939         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3940         if (val)
3941                 bp->flash_size = val;
3942         else
3943                 bp->flash_size = bp->flash_info->total_size;
3944
3945         return rc;
3946 }
3947
3948 static int
3949 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3950                 int buf_size)
3951 {
3952         int rc = 0;
3953         u32 cmd_flags, offset32, len32, extra;
3954
3955         if (buf_size == 0)
3956                 return 0;
3957
3958         /* Request access to the flash interface. */
3959         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3960                 return rc;
3961
3962         /* Enable access to flash interface */
3963         bnx2_enable_nvram_access(bp);
3964
3965         len32 = buf_size;
3966         offset32 = offset;
3967         extra = 0;
3968
3969         cmd_flags = 0;
3970
3971         if (offset32 & 3) {
3972                 u8 buf[4];
3973                 u32 pre_len;
3974
3975                 offset32 &= ~3;
3976                 pre_len = 4 - (offset & 3);
3977
3978                 if (pre_len >= len32) {
3979                         pre_len = len32;
3980                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3981                                     BNX2_NVM_COMMAND_LAST;
3982                 }
3983                 else {
3984                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3985                 }
3986
3987                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3988
3989                 if (rc)
3990                         return rc;
3991
3992                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3993
3994                 offset32 += 4;
3995                 ret_buf += pre_len;
3996                 len32 -= pre_len;
3997         }
3998         if (len32 & 3) {
3999                 extra = 4 - (len32 & 3);
4000                 len32 = (len32 + 4) & ~3;
4001         }
4002
4003         if (len32 == 4) {
4004                 u8 buf[4];
4005
4006                 if (cmd_flags)
4007                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4008                 else
4009                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4010                                     BNX2_NVM_COMMAND_LAST;
4011
4012                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4013
4014                 memcpy(ret_buf, buf, 4 - extra);
4015         }
4016         else if (len32 > 0) {
4017                 u8 buf[4];
4018
4019                 /* Read the first word. */
4020                 if (cmd_flags)
4021                         cmd_flags = 0;
4022                 else
4023                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4024
4025                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4026
4027                 /* Advance to the next dword. */
4028                 offset32 += 4;
4029                 ret_buf += 4;
4030                 len32 -= 4;
4031
4032                 while (len32 > 4 && rc == 0) {
4033                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4034
4035                         /* Advance to the next dword. */
4036                         offset32 += 4;
4037                         ret_buf += 4;
4038                         len32 -= 4;
4039                 }
4040
4041                 if (rc)
4042                         return rc;
4043
4044                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4045                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4046
4047                 memcpy(ret_buf, buf, 4 - extra);
4048         }
4049
4050         /* Disable access to flash interface */
4051         bnx2_disable_nvram_access(bp);
4052
4053         bnx2_release_nvram_lock(bp);
4054
4055         return rc;
4056 }
4057
4058 static int
4059 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4060                 int buf_size)
4061 {
4062         u32 written, offset32, len32;
4063         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4064         int rc = 0;
4065         int align_start, align_end;
4066
4067         buf = data_buf;
4068         offset32 = offset;
4069         len32 = buf_size;
4070         align_start = align_end = 0;
4071
4072         if ((align_start = (offset32 & 3))) {
4073                 offset32 &= ~3;
4074                 len32 += align_start;
4075                 if (len32 < 4)
4076                         len32 = 4;
4077                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4078                         return rc;
4079         }
4080
4081         if (len32 & 3) {
4082                 align_end = 4 - (len32 & 3);
4083                 len32 += align_end;
4084                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4085                         return rc;
4086         }
4087
4088         if (align_start || align_end) {
4089                 align_buf = kmalloc(len32, GFP_KERNEL);
4090                 if (align_buf == NULL)
4091                         return -ENOMEM;
4092                 if (align_start) {
4093                         memcpy(align_buf, start, 4);
4094                 }
4095                 if (align_end) {
4096                         memcpy(align_buf + len32 - 4, end, 4);
4097                 }
4098                 memcpy(align_buf + align_start, data_buf, buf_size);
4099                 buf = align_buf;
4100         }
4101
4102         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4103                 flash_buffer = kmalloc(264, GFP_KERNEL);
4104                 if (flash_buffer == NULL) {
4105                         rc = -ENOMEM;
4106                         goto nvram_write_end;
4107                 }
4108         }
4109
4110         written = 0;
4111         while ((written < len32) && (rc == 0)) {
4112                 u32 page_start, page_end, data_start, data_end;
4113                 u32 addr, cmd_flags;
4114                 int i;
4115
4116                 /* Find the page_start addr */
4117                 page_start = offset32 + written;
4118                 page_start -= (page_start % bp->flash_info->page_size);
4119                 /* Find the page_end addr */
4120                 page_end = page_start + bp->flash_info->page_size;
4121                 /* Find the data_start addr */
4122                 data_start = (written == 0) ? offset32 : page_start;
4123                 /* Find the data_end addr */
4124                 data_end = (page_end > offset32 + len32) ?
4125                         (offset32 + len32) : page_end;
4126
4127                 /* Request access to the flash interface. */
4128                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4129                         goto nvram_write_end;
4130
4131                 /* Enable access to flash interface */
4132                 bnx2_enable_nvram_access(bp);
4133
4134                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4135                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4136                         int j;
4137
4138                         /* Read the whole page into the buffer
4139                          * (non-buffer flash only) */
4140                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4141                                 if (j == (bp->flash_info->page_size - 4)) {
4142                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4143                                 }
4144                                 rc = bnx2_nvram_read_dword(bp,
4145                                         page_start + j,
4146                                         &flash_buffer[j],
4147                                         cmd_flags);
4148
4149                                 if (rc)
4150                                         goto nvram_write_end;
4151
4152                                 cmd_flags = 0;
4153                         }
4154                 }
4155
4156                 /* Enable writes to flash interface (unlock write-protect) */
4157                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4158                         goto nvram_write_end;
4159
4160                 /* Loop to write back the buffer data from page_start to
4161                  * data_start */
4162                 i = 0;
4163                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4164                         /* Erase the page */
4165                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4166                                 goto nvram_write_end;
4167
4168                         /* Re-enable the write again for the actual write */
4169                         bnx2_enable_nvram_write(bp);
4170
4171                         for (addr = page_start; addr < data_start;
4172                                 addr += 4, i += 4) {
4173
4174                                 rc = bnx2_nvram_write_dword(bp, addr,
4175                                         &flash_buffer[i], cmd_flags);
4176
4177                                 if (rc != 0)
4178                                         goto nvram_write_end;
4179
4180                                 cmd_flags = 0;
4181                         }
4182                 }
4183
4184                 /* Loop to write the new data from data_start to data_end */
4185                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4186                         if ((addr == page_end - 4) ||
4187                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4188                                  (addr == data_end - 4))) {
4189
4190                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4191                         }
4192                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4193                                 cmd_flags);
4194
4195                         if (rc != 0)
4196                                 goto nvram_write_end;
4197
4198                         cmd_flags = 0;
4199                         buf += 4;
4200                 }
4201
4202                 /* Loop to write back the buffer data from data_end
4203                  * to page_end */
4204                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4205                         for (addr = data_end; addr < page_end;
4206                                 addr += 4, i += 4) {
4207
4208                                 if (addr == page_end-4) {
4209                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4210                                 }
4211                                 rc = bnx2_nvram_write_dword(bp, addr,
4212                                         &flash_buffer[i], cmd_flags);
4213
4214                                 if (rc != 0)
4215                                         goto nvram_write_end;
4216
4217                                 cmd_flags = 0;
4218                         }
4219                 }
4220
4221                 /* Disable writes to flash interface (lock write-protect) */
4222                 bnx2_disable_nvram_write(bp);
4223
4224                 /* Disable access to flash interface */
4225                 bnx2_disable_nvram_access(bp);
4226                 bnx2_release_nvram_lock(bp);
4227
4228                 /* Increment written */
4229                 written += data_end - data_start;
4230         }
4231
4232 nvram_write_end:
4233         kfree(flash_buffer);
4234         kfree(align_buf);
4235         return rc;
4236 }
4237
4238 static void
4239 bnx2_init_remote_phy(struct bnx2 *bp)
4240 {
4241         u32 val;
4242
4243         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4244         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4245                 return;
4246
4247         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4248         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4249                 return;
4250
4251         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4252                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4253
4254                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4255                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4256                         bp->phy_port = PORT_FIBRE;
4257                 else
4258                         bp->phy_port = PORT_TP;
4259
4260                 if (netif_running(bp->dev)) {
4261                         u32 sig;
4262
4263                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4264                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4265                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4266                 }
4267         }
4268 }
4269
4270 static void
4271 bnx2_setup_msix_tbl(struct bnx2 *bp)
4272 {
4273         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4274
4275         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4276         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4277 }
4278
4279 static int
4280 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4281 {
4282         u32 val;
4283         int i, rc = 0;
4284         u8 old_port;
4285
4286         /* Wait for the current PCI transaction to complete before
4287          * issuing a reset. */
4288         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4289                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4290                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4291                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4292                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4293         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4294         udelay(5);
4295
4296         /* Wait for the firmware to tell us it is ok to issue a reset. */
4297         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4298
4299         /* Deposit a driver reset signature so the firmware knows that
4300          * this is a soft reset. */
4301         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4302                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4303
4304         /* Do a dummy read to force the chip to complete all current transaction
4305          * before we issue a reset. */
4306         val = REG_RD(bp, BNX2_MISC_ID);
4307
4308         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4309                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4310                 REG_RD(bp, BNX2_MISC_COMMAND);
4311                 udelay(5);
4312
4313                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4314                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4315
4316                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4317
4318         } else {
4319                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4320                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4321                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4322
4323                 /* Chip reset. */
4324                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4325
4326                 /* Reading back any register after chip reset will hang the
4327                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4328                  * of margin for write posting.
4329                  */
4330                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4331                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4332                         msleep(20);
4333
4334                 /* Reset takes approximate 30 usec */
4335                 for (i = 0; i < 10; i++) {
4336                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4337                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4338                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4339                                 break;
4340                         udelay(10);
4341                 }
4342
4343                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4344                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4345                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4346                         return -EBUSY;
4347                 }
4348         }
4349
4350         /* Make sure byte swapping is properly configured. */
4351         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4352         if (val != 0x01020304) {
4353                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4354                 return -ENODEV;
4355         }
4356
4357         /* Wait for the firmware to finish its initialization. */
4358         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4359         if (rc)
4360                 return rc;
4361
4362         spin_lock_bh(&bp->phy_lock);
4363         old_port = bp->phy_port;
4364         bnx2_init_remote_phy(bp);
4365         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4366             old_port != bp->phy_port)
4367                 bnx2_set_default_remote_link(bp);
4368         spin_unlock_bh(&bp->phy_lock);
4369
4370         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4371                 /* Adjust the voltage regular to two steps lower.  The default
4372                  * of this register is 0x0000000e. */
4373                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4374
4375                 /* Remove bad rbuf memory from the free pool. */
4376                 rc = bnx2_alloc_bad_rbuf(bp);
4377         }
4378
4379         if (bp->flags & BNX2_FLAG_USING_MSIX)
4380                 bnx2_setup_msix_tbl(bp);
4381
4382         return rc;
4383 }
4384
4385 static int
4386 bnx2_init_chip(struct bnx2 *bp)
4387 {
4388         u32 val;
4389         int rc, i;
4390
4391         /* Make sure the interrupt is not active. */
4392         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4393
4394         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4395               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4396 #ifdef __BIG_ENDIAN
4397               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4398 #endif
4399               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4400               DMA_READ_CHANS << 12 |
4401               DMA_WRITE_CHANS << 16;
4402
4403         val |= (0x2 << 20) | (1 << 11);
4404
4405         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4406                 val |= (1 << 23);
4407
4408         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4409             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4410                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4411
4412         REG_WR(bp, BNX2_DMA_CONFIG, val);
4413
4414         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4415                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4416                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4417                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4418         }
4419
4420         if (bp->flags & BNX2_FLAG_PCIX) {
4421                 u16 val16;
4422
4423                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4424                                      &val16);
4425                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4426                                       val16 & ~PCI_X_CMD_ERO);
4427         }
4428
4429         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4430                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4431                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4432                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4433
4434         /* Initialize context mapping and zero out the quick contexts.  The
4435          * context block must have already been enabled. */
4436         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4437                 rc = bnx2_init_5709_context(bp);
4438                 if (rc)
4439                         return rc;
4440         } else
4441                 bnx2_init_context(bp);
4442
4443         if ((rc = bnx2_init_cpus(bp)) != 0)
4444                 return rc;
4445
4446         bnx2_init_nvram(bp);
4447
4448         bnx2_set_mac_addr(bp);
4449
4450         val = REG_RD(bp, BNX2_MQ_CONFIG);
4451         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4452         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4453         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4454                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4455
4456         REG_WR(bp, BNX2_MQ_CONFIG, val);
4457
4458         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4459         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4460         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4461
4462         val = (BCM_PAGE_BITS - 8) << 24;
4463         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4464
4465         /* Configure page size. */
4466         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4467         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4468         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4469         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4470
4471         val = bp->mac_addr[0] +
4472               (bp->mac_addr[1] << 8) +
4473               (bp->mac_addr[2] << 16) +
4474               bp->mac_addr[3] +
4475               (bp->mac_addr[4] << 8) +
4476               (bp->mac_addr[5] << 16);
4477         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4478
4479         /* Program the MTU.  Also include 4 bytes for CRC32. */
4480         val = bp->dev->mtu + ETH_HLEN + 4;
4481         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4482                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4483         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4484
4485         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4486                 bp->bnx2_napi[i].last_status_idx = 0;
4487
4488         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4489
4490         /* Set up how to generate a link change interrupt. */
4491         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4492
4493         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4494                (u64) bp->status_blk_mapping & 0xffffffff);
4495         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4496
4497         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4498                (u64) bp->stats_blk_mapping & 0xffffffff);
4499         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4500                (u64) bp->stats_blk_mapping >> 32);
4501
4502         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4503                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4504
4505         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4506                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4507
4508         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4509                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4510
4511         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4512
4513         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4514
4515         REG_WR(bp, BNX2_HC_COM_TICKS,
4516                (bp->com_ticks_int << 16) | bp->com_ticks);
4517
4518         REG_WR(bp, BNX2_HC_CMD_TICKS,
4519                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4520
4521         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4522                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4523         else
4524                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4525         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4526
4527         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4528                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4529         else {
4530                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4531                       BNX2_HC_CONFIG_COLLECT_STATS;
4532         }
4533
4534         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4535                 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4536                            BNX2_HC_SB_CONFIG_1;
4537
4538                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4539                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4540
4541                 REG_WR(bp, base,
4542                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4543                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4544
4545                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4546                         (bp->tx_quick_cons_trip_int << 16) |
4547                          bp->tx_quick_cons_trip);
4548
4549                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4550                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4551
4552                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4553         }
4554
4555         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4556                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4557
4558         REG_WR(bp, BNX2_HC_CONFIG, val);
4559
4560         /* Clear internal stats counters. */
4561         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4562
4563         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4564
4565         /* Initialize the receive filter. */
4566         bnx2_set_rx_mode(bp->dev);
4567
4568         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4569                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4570                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4571                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4572         }
4573         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4574                           0);
4575
4576         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4577         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4578
4579         udelay(20);
4580
4581         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4582
4583         return rc;
4584 }
4585
4586 static void
4587 bnx2_clear_ring_states(struct bnx2 *bp)
4588 {
4589         struct bnx2_napi *bnapi;
4590         struct bnx2_tx_ring_info *txr;
4591         struct bnx2_rx_ring_info *rxr;
4592         int i;
4593
4594         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4595                 bnapi = &bp->bnx2_napi[i];
4596                 txr = &bnapi->tx_ring;
4597                 rxr = &bnapi->rx_ring;
4598
4599                 txr->tx_cons = 0;
4600                 txr->hw_tx_cons = 0;
4601                 rxr->rx_prod_bseq = 0;
4602                 rxr->rx_prod = 0;
4603                 rxr->rx_cons = 0;
4604                 rxr->rx_pg_prod = 0;
4605                 rxr->rx_pg_cons = 0;
4606         }
4607 }
4608
4609 static void
4610 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4611 {
4612         u32 val, offset0, offset1, offset2, offset3;
4613         u32 cid_addr = GET_CID_ADDR(cid);
4614
4615         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4616                 offset0 = BNX2_L2CTX_TYPE_XI;
4617                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4618                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4619                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4620         } else {
4621                 offset0 = BNX2_L2CTX_TYPE;
4622                 offset1 = BNX2_L2CTX_CMD_TYPE;
4623                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4624                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4625         }
4626         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4627         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4628
4629         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4630         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4631
4632         val = (u64) txr->tx_desc_mapping >> 32;
4633         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4634
4635         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4636         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4637 }
4638
4639 static void
4640 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4641 {
4642         struct tx_bd *txbd;
4643         u32 cid = TX_CID;
4644         struct bnx2_napi *bnapi;
4645         struct bnx2_tx_ring_info *txr;
4646
4647         bnapi = &bp->bnx2_napi[ring_num];
4648         txr = &bnapi->tx_ring;
4649
4650         if (ring_num == 0)
4651                 cid = TX_CID;
4652         else
4653                 cid = TX_TSS_CID + ring_num - 1;
4654
4655         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4656
4657         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4658
4659         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4660         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4661
4662         txr->tx_prod = 0;
4663         txr->tx_prod_bseq = 0;
4664
4665         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4666         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4667
4668         bnx2_init_tx_context(bp, cid, txr);
4669 }
4670
4671 static void
4672 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4673                      int num_rings)
4674 {
4675         int i;
4676         struct rx_bd *rxbd;
4677
4678         for (i = 0; i < num_rings; i++) {
4679                 int j;
4680
4681                 rxbd = &rx_ring[i][0];
4682                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4683                         rxbd->rx_bd_len = buf_size;
4684                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4685                 }
4686                 if (i == (num_rings - 1))
4687                         j = 0;
4688                 else
4689                         j = i + 1;
4690                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4691                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4692         }
4693 }
4694
4695 static void
4696 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4697 {
4698         int i;
4699         u16 prod, ring_prod;
4700         u32 cid, rx_cid_addr, val;
4701         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4702         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4703
4704         if (ring_num == 0)
4705                 cid = RX_CID;
4706         else
4707                 cid = RX_RSS_CID + ring_num - 1;
4708
4709         rx_cid_addr = GET_CID_ADDR(cid);
4710
4711         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4712                              bp->rx_buf_use_size, bp->rx_max_ring);
4713
4714         bnx2_init_rx_context(bp, cid);
4715
4716         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4717                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4718                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4719         }
4720
4721         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4722         if (bp->rx_pg_ring_size) {
4723                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4724                                      rxr->rx_pg_desc_mapping,
4725                                      PAGE_SIZE, bp->rx_max_pg_ring);
4726                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4727                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4728                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4729                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4730
4731                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4732                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4733
4734                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4735                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4736
4737                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4738                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4739         }
4740
4741         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4742         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4743
4744         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4745         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4746
4747         ring_prod = prod = rxr->rx_pg_prod;
4748         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4749                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4750                         break;
4751                 prod = NEXT_RX_BD(prod);
4752                 ring_prod = RX_PG_RING_IDX(prod);
4753         }
4754         rxr->rx_pg_prod = prod;
4755
4756         ring_prod = prod = rxr->rx_prod;
4757         for (i = 0; i < bp->rx_ring_size; i++) {
4758                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4759                         break;
4760                 prod = NEXT_RX_BD(prod);
4761                 ring_prod = RX_RING_IDX(prod);
4762         }
4763         rxr->rx_prod = prod;
4764
4765         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4766         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4767         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4768
4769         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4770         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4771
4772         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4773 }
4774
4775 static void
4776 bnx2_init_all_rings(struct bnx2 *bp)
4777 {
4778         int i;
4779
4780         bnx2_clear_ring_states(bp);
4781
4782         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4783         for (i = 0; i < bp->num_tx_rings; i++)
4784                 bnx2_init_tx_ring(bp, i);
4785
4786         if (bp->num_tx_rings > 1)
4787                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4788                        (TX_TSS_CID << 7));
4789
4790         for (i = 0; i < bp->num_rx_rings; i++)
4791                 bnx2_init_rx_ring(bp, i);
4792 }
4793
4794 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4795 {
4796         u32 max, num_rings = 1;
4797
4798         while (ring_size > MAX_RX_DESC_CNT) {
4799                 ring_size -= MAX_RX_DESC_CNT;
4800                 num_rings++;
4801         }
4802         /* round to next power of 2 */
4803         max = max_size;
4804         while ((max & num_rings) == 0)
4805                 max >>= 1;
4806
4807         if (num_rings != max)
4808                 max <<= 1;
4809
4810         return max;
4811 }
4812
4813 static void
4814 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4815 {
4816         u32 rx_size, rx_space, jumbo_size;
4817
4818         /* 8 for CRC and VLAN */
4819         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4820
4821         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4822                 sizeof(struct skb_shared_info);
4823
4824         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4825         bp->rx_pg_ring_size = 0;
4826         bp->rx_max_pg_ring = 0;
4827         bp->rx_max_pg_ring_idx = 0;
4828         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4829                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4830
4831                 jumbo_size = size * pages;
4832                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4833                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4834
4835                 bp->rx_pg_ring_size = jumbo_size;
4836                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4837                                                         MAX_RX_PG_RINGS);
4838                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4839                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4840                 bp->rx_copy_thresh = 0;
4841         }
4842
4843         bp->rx_buf_use_size = rx_size;
4844         /* hw alignment */
4845         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4846         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4847         bp->rx_ring_size = size;
4848         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4849         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4850 }
4851
4852 static void
4853 bnx2_free_tx_skbs(struct bnx2 *bp)
4854 {
4855         int i;
4856
4857         for (i = 0; i < bp->num_tx_rings; i++) {
4858                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4859                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4860                 int j;
4861
4862                 if (txr->tx_buf_ring == NULL)
4863                         continue;
4864
4865                 for (j = 0; j < TX_DESC_CNT; ) {
4866                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4867                         struct sk_buff *skb = tx_buf->skb;
4868                         int k, last;
4869
4870                         if (skb == NULL) {
4871                                 j++;
4872                                 continue;
4873                         }
4874
4875                         pci_unmap_single(bp->pdev,
4876                                          pci_unmap_addr(tx_buf, mapping),
4877                         skb_headlen(skb), PCI_DMA_TODEVICE);
4878
4879                         tx_buf->skb = NULL;
4880
4881                         last = skb_shinfo(skb)->nr_frags;
4882                         for (k = 0; k < last; k++) {
4883                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4884                                 pci_unmap_page(bp->pdev,
4885                                         pci_unmap_addr(tx_buf, mapping),
4886                                         skb_shinfo(skb)->frags[j].size,
4887                                         PCI_DMA_TODEVICE);
4888                         }
4889                         dev_kfree_skb(skb);
4890                         j += k + 1;
4891                 }
4892         }
4893 }
4894
4895 static void
4896 bnx2_free_rx_skbs(struct bnx2 *bp)
4897 {
4898         int i;
4899
4900         for (i = 0; i < bp->num_rx_rings; i++) {
4901                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4902                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4903                 int j;
4904
4905                 if (rxr->rx_buf_ring == NULL)
4906                         return;
4907
4908                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4909                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4910                         struct sk_buff *skb = rx_buf->skb;
4911
4912                         if (skb == NULL)
4913                                 continue;
4914
4915                         pci_unmap_single(bp->pdev,
4916                                          pci_unmap_addr(rx_buf, mapping),
4917                                          bp->rx_buf_use_size,
4918                                          PCI_DMA_FROMDEVICE);
4919
4920                         rx_buf->skb = NULL;
4921
4922                         dev_kfree_skb(skb);
4923                 }
4924                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4925                         bnx2_free_rx_page(bp, rxr, j);
4926         }
4927 }
4928
4929 static void
4930 bnx2_free_skbs(struct bnx2 *bp)
4931 {
4932         bnx2_free_tx_skbs(bp);
4933         bnx2_free_rx_skbs(bp);
4934 }
4935
4936 static int
4937 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4938 {
4939         int rc;
4940
4941         rc = bnx2_reset_chip(bp, reset_code);
4942         bnx2_free_skbs(bp);
4943         if (rc)
4944                 return rc;
4945
4946         if ((rc = bnx2_init_chip(bp)) != 0)
4947                 return rc;
4948
4949         bnx2_init_all_rings(bp);
4950         return 0;
4951 }
4952
4953 static int
4954 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4955 {
4956         int rc;
4957
4958         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4959                 return rc;
4960
4961         spin_lock_bh(&bp->phy_lock);
4962         bnx2_init_phy(bp, reset_phy);
4963         bnx2_set_link(bp);
4964         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4965                 bnx2_remote_phy_event(bp);
4966         spin_unlock_bh(&bp->phy_lock);
4967         return 0;
4968 }
4969
4970 static int
4971 bnx2_test_registers(struct bnx2 *bp)
4972 {
4973         int ret;
4974         int i, is_5709;
4975         static const struct {
4976                 u16   offset;
4977                 u16   flags;
4978 #define BNX2_FL_NOT_5709        1
4979                 u32   rw_mask;
4980                 u32   ro_mask;
4981         } reg_tbl[] = {
4982                 { 0x006c, 0, 0x00000000, 0x0000003f },
4983                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4984                 { 0x0094, 0, 0x00000000, 0x00000000 },
4985
4986                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4987                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4988                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4989                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4990                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4991                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4992                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4993                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4994                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4995
4996                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4997                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4998                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4999                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5000                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5001                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5002
5003                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5004                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5005                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5006
5007                 { 0x1000, 0, 0x00000000, 0x00000001 },
5008                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5009
5010                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5011                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5012                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5013                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5014                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5015                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5016                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5017                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5018                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5019                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5020
5021                 { 0x1800, 0, 0x00000000, 0x00000001 },
5022                 { 0x1804, 0, 0x00000000, 0x00000003 },
5023
5024                 { 0x2800, 0, 0x00000000, 0x00000001 },
5025                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5026                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5027                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5028                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5029                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5030                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5031                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5032                 { 0x2840, 0, 0x00000000, 0xffffffff },
5033                 { 0x2844, 0, 0x00000000, 0xffffffff },
5034                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5035                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5036
5037                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5038                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5039
5040                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5041                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5042                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5043                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5044                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5045                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5046                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5047                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5048                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5049
5050                 { 0x5004, 0, 0x00000000, 0x0000007f },
5051                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5052
5053                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5054                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5055                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5056                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5057                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5058                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5059                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5060                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5061                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5062
5063                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5064                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5065                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5066                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5067                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5068                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5069                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5070                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5071                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5072                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5073                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5074                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5075                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5076                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5077                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5078                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5079                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5080                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5081                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5082                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5083                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5084                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5085                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5086
5087                 { 0xffff, 0, 0x00000000, 0x00000000 },
5088         };
5089
5090         ret = 0;
5091         is_5709 = 0;
5092         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5093                 is_5709 = 1;
5094
5095         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5096                 u32 offset, rw_mask, ro_mask, save_val, val;
5097                 u16 flags = reg_tbl[i].flags;
5098
5099                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5100                         continue;
5101
5102                 offset = (u32) reg_tbl[i].offset;
5103                 rw_mask = reg_tbl[i].rw_mask;
5104                 ro_mask = reg_tbl[i].ro_mask;
5105
5106                 save_val = readl(bp->regview + offset);
5107
5108                 writel(0, bp->regview + offset);
5109
5110                 val = readl(bp->regview + offset);
5111                 if ((val & rw_mask) != 0) {
5112                         goto reg_test_err;
5113                 }
5114
5115                 if ((val & ro_mask) != (save_val & ro_mask)) {
5116                         goto reg_test_err;
5117                 }
5118
5119                 writel(0xffffffff, bp->regview + offset);
5120
5121                 val = readl(bp->regview + offset);
5122                 if ((val & rw_mask) != rw_mask) {
5123                         goto reg_test_err;
5124                 }
5125
5126                 if ((val & ro_mask) != (save_val & ro_mask)) {
5127                         goto reg_test_err;
5128                 }
5129
5130                 writel(save_val, bp->regview + offset);
5131                 continue;
5132
5133 reg_test_err:
5134                 writel(save_val, bp->regview + offset);
5135                 ret = -ENODEV;
5136                 break;
5137         }
5138         return ret;
5139 }
5140
5141 static int
5142 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5143 {
5144         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5145                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5146         int i;
5147
5148         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5149                 u32 offset;
5150
5151                 for (offset = 0; offset < size; offset += 4) {
5152
5153                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5154
5155                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5156                                 test_pattern[i]) {
5157                                 return -ENODEV;
5158                         }
5159                 }
5160         }
5161         return 0;
5162 }
5163
5164 static int
5165 bnx2_test_memory(struct bnx2 *bp)
5166 {
5167         int ret = 0;
5168         int i;
5169         static struct mem_entry {
5170                 u32   offset;
5171                 u32   len;
5172         } mem_tbl_5706[] = {
5173                 { 0x60000,  0x4000 },
5174                 { 0xa0000,  0x3000 },
5175                 { 0xe0000,  0x4000 },
5176                 { 0x120000, 0x4000 },
5177                 { 0x1a0000, 0x4000 },
5178                 { 0x160000, 0x4000 },
5179                 { 0xffffffff, 0    },
5180         },
5181         mem_tbl_5709[] = {
5182                 { 0x60000,  0x4000 },
5183                 { 0xa0000,  0x3000 },
5184                 { 0xe0000,  0x4000 },
5185                 { 0x120000, 0x4000 },
5186                 { 0x1a0000, 0x4000 },
5187                 { 0xffffffff, 0    },
5188         };
5189         struct mem_entry *mem_tbl;
5190
5191         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5192                 mem_tbl = mem_tbl_5709;
5193         else
5194                 mem_tbl = mem_tbl_5706;
5195
5196         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5197                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5198                         mem_tbl[i].len)) != 0) {
5199                         return ret;
5200                 }
5201         }
5202
5203         return ret;
5204 }
5205
5206 #define BNX2_MAC_LOOPBACK       0
5207 #define BNX2_PHY_LOOPBACK       1
5208
5209 static int
5210 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5211 {
5212         unsigned int pkt_size, num_pkts, i;
5213         struct sk_buff *skb, *rx_skb;
5214         unsigned char *packet;
5215         u16 rx_start_idx, rx_idx;
5216         dma_addr_t map;
5217         struct tx_bd *txbd;
5218         struct sw_bd *rx_buf;
5219         struct l2_fhdr *rx_hdr;
5220         int ret = -ENODEV;
5221         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5222         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5223         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5224
5225         tx_napi = bnapi;
5226
5227         txr = &tx_napi->tx_ring;
5228         rxr = &bnapi->rx_ring;
5229         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5230                 bp->loopback = MAC_LOOPBACK;
5231                 bnx2_set_mac_loopback(bp);
5232         }
5233         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5234                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5235                         return 0;
5236
5237                 bp->loopback = PHY_LOOPBACK;
5238                 bnx2_set_phy_loopback(bp);
5239         }
5240         else
5241                 return -EINVAL;
5242
5243         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5244         skb = netdev_alloc_skb(bp->dev, pkt_size);
5245         if (!skb)
5246                 return -ENOMEM;
5247         packet = skb_put(skb, pkt_size);
5248         memcpy(packet, bp->dev->dev_addr, 6);
5249         memset(packet + 6, 0x0, 8);
5250         for (i = 14; i < pkt_size; i++)
5251                 packet[i] = (unsigned char) (i & 0xff);
5252
5253         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5254                 PCI_DMA_TODEVICE);
5255
5256         REG_WR(bp, BNX2_HC_COMMAND,
5257                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5258
5259         REG_RD(bp, BNX2_HC_COMMAND);
5260
5261         udelay(5);
5262         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5263
5264         num_pkts = 0;
5265
5266         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5267
5268         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5269         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5270         txbd->tx_bd_mss_nbytes = pkt_size;
5271         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5272
5273         num_pkts++;
5274         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5275         txr->tx_prod_bseq += pkt_size;
5276
5277         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5278         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5279
5280         udelay(100);
5281
5282         REG_WR(bp, BNX2_HC_COMMAND,
5283                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5284
5285         REG_RD(bp, BNX2_HC_COMMAND);
5286
5287         udelay(5);
5288
5289         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5290         dev_kfree_skb(skb);
5291
5292         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5293                 goto loopback_test_done;
5294
5295         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5296         if (rx_idx != rx_start_idx + num_pkts) {
5297                 goto loopback_test_done;
5298         }
5299
5300         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5301         rx_skb = rx_buf->skb;
5302
5303         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5304         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5305
5306         pci_dma_sync_single_for_cpu(bp->pdev,
5307                 pci_unmap_addr(rx_buf, mapping),
5308                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5309
5310         if (rx_hdr->l2_fhdr_status &
5311                 (L2_FHDR_ERRORS_BAD_CRC |
5312                 L2_FHDR_ERRORS_PHY_DECODE |
5313                 L2_FHDR_ERRORS_ALIGNMENT |
5314                 L2_FHDR_ERRORS_TOO_SHORT |
5315                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5316
5317                 goto loopback_test_done;
5318         }
5319
5320         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5321                 goto loopback_test_done;
5322         }
5323
5324         for (i = 14; i < pkt_size; i++) {
5325                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5326                         goto loopback_test_done;
5327                 }
5328         }
5329
5330         ret = 0;
5331
5332 loopback_test_done:
5333         bp->loopback = 0;
5334         return ret;
5335 }
5336
5337 #define BNX2_MAC_LOOPBACK_FAILED        1
5338 #define BNX2_PHY_LOOPBACK_FAILED        2
5339 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5340                                          BNX2_PHY_LOOPBACK_FAILED)
5341
5342 static int
5343 bnx2_test_loopback(struct bnx2 *bp)
5344 {
5345         int rc = 0;
5346
5347         if (!netif_running(bp->dev))
5348                 return BNX2_LOOPBACK_FAILED;
5349
5350         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5351         spin_lock_bh(&bp->phy_lock);
5352         bnx2_init_phy(bp, 1);
5353         spin_unlock_bh(&bp->phy_lock);
5354         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5355                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5356         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5357                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5358         return rc;
5359 }
5360
5361 #define NVRAM_SIZE 0x200
5362 #define CRC32_RESIDUAL 0xdebb20e3
5363
5364 static int
5365 bnx2_test_nvram(struct bnx2 *bp)
5366 {
5367         __be32 buf[NVRAM_SIZE / 4];
5368         u8 *data = (u8 *) buf;
5369         int rc = 0;
5370         u32 magic, csum;
5371
5372         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5373                 goto test_nvram_done;
5374
5375         magic = be32_to_cpu(buf[0]);
5376         if (magic != 0x669955aa) {
5377                 rc = -ENODEV;
5378                 goto test_nvram_done;
5379         }
5380
5381         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5382                 goto test_nvram_done;
5383
5384         csum = ether_crc_le(0x100, data);
5385         if (csum != CRC32_RESIDUAL) {
5386                 rc = -ENODEV;
5387                 goto test_nvram_done;
5388         }
5389
5390         csum = ether_crc_le(0x100, data + 0x100);
5391         if (csum != CRC32_RESIDUAL) {
5392                 rc = -ENODEV;
5393         }
5394
5395 test_nvram_done:
5396         return rc;
5397 }
5398
5399 static int
5400 bnx2_test_link(struct bnx2 *bp)
5401 {
5402         u32 bmsr;
5403
5404         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5405                 if (bp->link_up)
5406                         return 0;
5407                 return -ENODEV;
5408         }
5409         spin_lock_bh(&bp->phy_lock);
5410         bnx2_enable_bmsr1(bp);
5411         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5412         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5413         bnx2_disable_bmsr1(bp);
5414         spin_unlock_bh(&bp->phy_lock);
5415
5416         if (bmsr & BMSR_LSTATUS) {
5417                 return 0;
5418         }
5419         return -ENODEV;
5420 }
5421
5422 static int
5423 bnx2_test_intr(struct bnx2 *bp)
5424 {
5425         int i;
5426         u16 status_idx;
5427
5428         if (!netif_running(bp->dev))
5429                 return -ENODEV;
5430
5431         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5432
5433         /* This register is not touched during run-time. */
5434         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5435         REG_RD(bp, BNX2_HC_COMMAND);
5436
5437         for (i = 0; i < 10; i++) {
5438                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5439                         status_idx) {
5440
5441                         break;
5442                 }
5443
5444                 msleep_interruptible(10);
5445         }
5446         if (i < 10)
5447                 return 0;
5448
5449         return -ENODEV;
5450 }
5451
5452 /* Determining link for parallel detection. */
5453 static int
5454 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5455 {
5456         u32 mode_ctl, an_dbg, exp;
5457
5458         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5459                 return 0;
5460
5461         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5462         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5463
5464         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5465                 return 0;
5466
5467         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5468         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5469         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5470
5471         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5472                 return 0;
5473
5474         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5475         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5476         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5477
5478         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5479                 return 0;
5480
5481         return 1;
5482 }
5483
5484 static void
5485 bnx2_5706_serdes_timer(struct bnx2 *bp)
5486 {
5487         int check_link = 1;
5488
5489         spin_lock(&bp->phy_lock);
5490         if (bp->serdes_an_pending) {
5491                 bp->serdes_an_pending--;
5492                 check_link = 0;
5493         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5494                 u32 bmcr;
5495
5496                 bp->current_interval = bp->timer_interval;
5497
5498                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5499
5500                 if (bmcr & BMCR_ANENABLE) {
5501                         if (bnx2_5706_serdes_has_link(bp)) {
5502                                 bmcr &= ~BMCR_ANENABLE;
5503                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5504                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5505                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5506                         }
5507                 }
5508         }
5509         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5510                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5511                 u32 phy2;
5512
5513                 bnx2_write_phy(bp, 0x17, 0x0f01);
5514                 bnx2_read_phy(bp, 0x15, &phy2);
5515                 if (phy2 & 0x20) {
5516                         u32 bmcr;
5517
5518                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5519                         bmcr |= BMCR_ANENABLE;
5520                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5521
5522                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5523                 }
5524         } else
5525                 bp->current_interval = bp->timer_interval;
5526
5527         if (check_link) {
5528                 u32 val;
5529
5530                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5531                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5532                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5533
5534                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5535                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5536                                 bnx2_5706s_force_link_dn(bp, 1);
5537                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5538                         } else
5539                                 bnx2_set_link(bp);
5540                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5541                         bnx2_set_link(bp);
5542         }
5543         spin_unlock(&bp->phy_lock);
5544 }
5545
5546 static void
5547 bnx2_5708_serdes_timer(struct bnx2 *bp)
5548 {
5549         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5550                 return;
5551
5552         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5553                 bp->serdes_an_pending = 0;
5554                 return;
5555         }
5556
5557         spin_lock(&bp->phy_lock);
5558         if (bp->serdes_an_pending)
5559                 bp->serdes_an_pending--;
5560         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5561                 u32 bmcr;
5562
5563                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5564                 if (bmcr & BMCR_ANENABLE) {
5565                         bnx2_enable_forced_2g5(bp);
5566                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5567                 } else {
5568                         bnx2_disable_forced_2g5(bp);
5569                         bp->serdes_an_pending = 2;
5570                         bp->current_interval = bp->timer_interval;
5571                 }
5572
5573         } else
5574                 bp->current_interval = bp->timer_interval;
5575
5576         spin_unlock(&bp->phy_lock);
5577 }
5578
5579 static void
5580 bnx2_timer(unsigned long data)
5581 {
5582         struct bnx2 *bp = (struct bnx2 *) data;
5583
5584         if (!netif_running(bp->dev))
5585                 return;
5586
5587         if (atomic_read(&bp->intr_sem) != 0)
5588                 goto bnx2_restart_timer;
5589
5590         bnx2_send_heart_beat(bp);
5591
5592         bp->stats_blk->stat_FwRxDrop =
5593                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5594
5595         /* workaround occasional corrupted counters */
5596         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5597                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5598                                             BNX2_HC_COMMAND_STATS_NOW);
5599
5600         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5601                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5602                         bnx2_5706_serdes_timer(bp);
5603                 else
5604                         bnx2_5708_serdes_timer(bp);
5605         }
5606
5607 bnx2_restart_timer:
5608         mod_timer(&bp->timer, jiffies + bp->current_interval);
5609 }
5610
5611 static int
5612 bnx2_request_irq(struct bnx2 *bp)
5613 {
5614         struct net_device *dev = bp->dev;
5615         unsigned long flags;
5616         struct bnx2_irq *irq;
5617         int rc = 0, i;
5618
5619         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5620                 flags = 0;
5621         else
5622                 flags = IRQF_SHARED;
5623
5624         for (i = 0; i < bp->irq_nvecs; i++) {
5625                 irq = &bp->irq_tbl[i];
5626                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5627                                  dev);
5628                 if (rc)
5629                         break;
5630                 irq->requested = 1;
5631         }
5632         return rc;
5633 }
5634
5635 static void
5636 bnx2_free_irq(struct bnx2 *bp)
5637 {
5638         struct net_device *dev = bp->dev;
5639         struct bnx2_irq *irq;
5640         int i;
5641
5642         for (i = 0; i < bp->irq_nvecs; i++) {
5643                 irq = &bp->irq_tbl[i];
5644                 if (irq->requested)
5645                         free_irq(irq->vector, dev);
5646                 irq->requested = 0;
5647         }
5648         if (bp->flags & BNX2_FLAG_USING_MSI)
5649                 pci_disable_msi(bp->pdev);
5650         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5651                 pci_disable_msix(bp->pdev);
5652
5653         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5654 }
5655
5656 static void
5657 bnx2_enable_msix(struct bnx2 *bp)
5658 {
5659         int i, rc;
5660         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5661
5662         bnx2_setup_msix_tbl(bp);
5663         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5664         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5665         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5666
5667         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5668                 msix_ent[i].entry = i;
5669                 msix_ent[i].vector = 0;
5670
5671                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5672                 if (i == 0)
5673                         bp->irq_tbl[i].handler = bnx2_msi_1shot;
5674                 else
5675                         bp->irq_tbl[i].handler = bnx2_tx_msix;
5676         }
5677
5678         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5679         if (rc != 0)
5680                 return;
5681
5682         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5683         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5684         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5685                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5686 }
5687
5688 static void
5689 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5690 {
5691         bp->irq_tbl[0].handler = bnx2_interrupt;
5692         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5693         bp->irq_nvecs = 1;
5694         bp->irq_tbl[0].vector = bp->pdev->irq;
5695
5696         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5697                 bnx2_enable_msix(bp);
5698
5699         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5700             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5701                 if (pci_enable_msi(bp->pdev) == 0) {
5702                         bp->flags |= BNX2_FLAG_USING_MSI;
5703                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5704                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5705                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5706                         } else
5707                                 bp->irq_tbl[0].handler = bnx2_msi;
5708
5709                         bp->irq_tbl[0].vector = bp->pdev->irq;
5710                 }
5711         }
5712         bp->num_tx_rings = 1;
5713         bp->num_rx_rings = 1;
5714 }
5715
5716 /* Called with rtnl_lock */
5717 static int
5718 bnx2_open(struct net_device *dev)
5719 {
5720         struct bnx2 *bp = netdev_priv(dev);
5721         int rc;
5722
5723         netif_carrier_off(dev);
5724
5725         bnx2_set_power_state(bp, PCI_D0);
5726         bnx2_disable_int(bp);
5727
5728         bnx2_setup_int_mode(bp, disable_msi);
5729         bnx2_napi_enable(bp);
5730         rc = bnx2_alloc_mem(bp);
5731         if (rc) {
5732                 bnx2_napi_disable(bp);
5733                 bnx2_free_mem(bp);
5734                 return rc;
5735         }
5736
5737         rc = bnx2_request_irq(bp);
5738
5739         if (rc) {
5740                 bnx2_napi_disable(bp);
5741                 bnx2_free_mem(bp);
5742                 return rc;
5743         }
5744
5745         rc = bnx2_init_nic(bp, 1);
5746
5747         if (rc) {
5748                 bnx2_napi_disable(bp);
5749                 bnx2_free_irq(bp);
5750                 bnx2_free_skbs(bp);
5751                 bnx2_free_mem(bp);
5752                 return rc;
5753         }
5754
5755         mod_timer(&bp->timer, jiffies + bp->current_interval);
5756
5757         atomic_set(&bp->intr_sem, 0);
5758
5759         bnx2_enable_int(bp);
5760
5761         if (bp->flags & BNX2_FLAG_USING_MSI) {
5762                 /* Test MSI to make sure it is working
5763                  * If MSI test fails, go back to INTx mode
5764                  */
5765                 if (bnx2_test_intr(bp) != 0) {
5766                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5767                                " using MSI, switching to INTx mode. Please"
5768                                " report this failure to the PCI maintainer"
5769                                " and include system chipset information.\n",
5770                                bp->dev->name);
5771
5772                         bnx2_disable_int(bp);
5773                         bnx2_free_irq(bp);
5774
5775                         bnx2_setup_int_mode(bp, 1);
5776
5777                         rc = bnx2_init_nic(bp, 0);
5778
5779                         if (!rc)
5780                                 rc = bnx2_request_irq(bp);
5781
5782                         if (rc) {
5783                                 bnx2_napi_disable(bp);
5784                                 bnx2_free_skbs(bp);
5785                                 bnx2_free_mem(bp);
5786                                 del_timer_sync(&bp->timer);
5787                                 return rc;
5788                         }
5789                         bnx2_enable_int(bp);
5790                 }
5791         }
5792         if (bp->flags & BNX2_FLAG_USING_MSI)
5793                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5794         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5795                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5796
5797         netif_start_queue(dev);
5798
5799         return 0;
5800 }
5801
5802 static void
5803 bnx2_reset_task(struct work_struct *work)
5804 {
5805         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5806
5807         if (!netif_running(bp->dev))
5808                 return;
5809
5810         bnx2_netif_stop(bp);
5811
5812         bnx2_init_nic(bp, 1);
5813
5814         atomic_set(&bp->intr_sem, 1);
5815         bnx2_netif_start(bp);
5816 }
5817
5818 static void
5819 bnx2_tx_timeout(struct net_device *dev)
5820 {
5821         struct bnx2 *bp = netdev_priv(dev);
5822
5823         /* This allows the netif to be shutdown gracefully before resetting */
5824         schedule_work(&bp->reset_task);
5825 }
5826
5827 #ifdef BCM_VLAN
5828 /* Called with rtnl_lock */
5829 static void
5830 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5831 {
5832         struct bnx2 *bp = netdev_priv(dev);
5833
5834         bnx2_netif_stop(bp);
5835
5836         bp->vlgrp = vlgrp;
5837         bnx2_set_rx_mode(dev);
5838
5839         bnx2_netif_start(bp);
5840 }
5841 #endif
5842
5843 /* Called with netif_tx_lock.
5844  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5845  * netif_wake_queue().
5846  */
5847 static int
5848 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5849 {
5850         struct bnx2 *bp = netdev_priv(dev);
5851         dma_addr_t mapping;
5852         struct tx_bd *txbd;
5853         struct sw_bd *tx_buf;
5854         u32 len, vlan_tag_flags, last_frag, mss;
5855         u16 prod, ring_prod;
5856         int i;
5857         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5858         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5859
5860         if (unlikely(bnx2_tx_avail(bp, txr) <
5861             (skb_shinfo(skb)->nr_frags + 1))) {
5862                 netif_stop_queue(dev);
5863                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5864                         dev->name);
5865
5866                 return NETDEV_TX_BUSY;
5867         }
5868         len = skb_headlen(skb);
5869         prod = txr->tx_prod;
5870         ring_prod = TX_RING_IDX(prod);
5871
5872         vlan_tag_flags = 0;
5873         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5874                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5875         }
5876
5877         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5878                 vlan_tag_flags |=
5879                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5880         }
5881         if ((mss = skb_shinfo(skb)->gso_size)) {
5882                 u32 tcp_opt_len, ip_tcp_len;
5883                 struct iphdr *iph;
5884
5885                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5886
5887                 tcp_opt_len = tcp_optlen(skb);
5888
5889                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5890                         u32 tcp_off = skb_transport_offset(skb) -
5891                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5892
5893                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5894                                           TX_BD_FLAGS_SW_FLAGS;
5895                         if (likely(tcp_off == 0))
5896                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5897                         else {
5898                                 tcp_off >>= 3;
5899                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5900                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5901                                                   ((tcp_off & 0x10) <<
5902                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5903                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5904                         }
5905                 } else {
5906                         if (skb_header_cloned(skb) &&
5907                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5908                                 dev_kfree_skb(skb);
5909                                 return NETDEV_TX_OK;
5910                         }
5911
5912                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5913
5914                         iph = ip_hdr(skb);
5915                         iph->check = 0;
5916                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5917                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5918                                                                  iph->daddr, 0,
5919                                                                  IPPROTO_TCP,
5920                                                                  0);
5921                         if (tcp_opt_len || (iph->ihl > 5)) {
5922                                 vlan_tag_flags |= ((iph->ihl - 5) +
5923                                                    (tcp_opt_len >> 2)) << 8;
5924                         }
5925                 }
5926         } else
5927                 mss = 0;
5928
5929         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5930
5931         tx_buf = &txr->tx_buf_ring[ring_prod];
5932         tx_buf->skb = skb;
5933         pci_unmap_addr_set(tx_buf, mapping, mapping);
5934
5935         txbd = &txr->tx_desc_ring[ring_prod];
5936
5937         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5938         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5939         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5940         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5941
5942         last_frag = skb_shinfo(skb)->nr_frags;
5943
5944         for (i = 0; i < last_frag; i++) {
5945                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5946
5947                 prod = NEXT_TX_BD(prod);
5948                 ring_prod = TX_RING_IDX(prod);
5949                 txbd = &txr->tx_desc_ring[ring_prod];
5950
5951                 len = frag->size;
5952                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5953                         len, PCI_DMA_TODEVICE);
5954                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5955                                 mapping, mapping);
5956
5957                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5958                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5959                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5960                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5961
5962         }
5963         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5964
5965         prod = NEXT_TX_BD(prod);
5966         txr->tx_prod_bseq += skb->len;
5967
5968         REG_WR16(bp, txr->tx_bidx_addr, prod);
5969         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5970
5971         mmiowb();
5972
5973         txr->tx_prod = prod;
5974         dev->trans_start = jiffies;
5975
5976         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
5977                 netif_stop_queue(dev);
5978                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
5979                         netif_wake_queue(dev);
5980         }
5981
5982         return NETDEV_TX_OK;
5983 }
5984
5985 /* Called with rtnl_lock */
5986 static int
5987 bnx2_close(struct net_device *dev)
5988 {
5989         struct bnx2 *bp = netdev_priv(dev);
5990         u32 reset_code;
5991
5992         cancel_work_sync(&bp->reset_task);
5993
5994         bnx2_disable_int_sync(bp);
5995         bnx2_napi_disable(bp);
5996         del_timer_sync(&bp->timer);
5997         if (bp->flags & BNX2_FLAG_NO_WOL)
5998                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5999         else if (bp->wol)
6000                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6001         else
6002                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6003         bnx2_reset_chip(bp, reset_code);
6004         bnx2_free_irq(bp);
6005         bnx2_free_skbs(bp);
6006         bnx2_free_mem(bp);
6007         bp->link_up = 0;
6008         netif_carrier_off(bp->dev);
6009         bnx2_set_power_state(bp, PCI_D3hot);
6010         return 0;
6011 }
6012
6013 #define GET_NET_STATS64(ctr)                                    \
6014         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6015         (unsigned long) (ctr##_lo)
6016
6017 #define GET_NET_STATS32(ctr)            \
6018         (ctr##_lo)
6019
6020 #if (BITS_PER_LONG == 64)
6021 #define GET_NET_STATS   GET_NET_STATS64
6022 #else
6023 #define GET_NET_STATS   GET_NET_STATS32
6024 #endif
6025
6026 static struct net_device_stats *
6027 bnx2_get_stats(struct net_device *dev)
6028 {
6029         struct bnx2 *bp = netdev_priv(dev);
6030         struct statistics_block *stats_blk = bp->stats_blk;
6031         struct net_device_stats *net_stats = &bp->net_stats;
6032
6033         if (bp->stats_blk == NULL) {
6034                 return net_stats;
6035         }
6036         net_stats->rx_packets =
6037                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6038                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6039                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6040
6041         net_stats->tx_packets =
6042                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6043                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6044                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6045
6046         net_stats->rx_bytes =
6047                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6048
6049         net_stats->tx_bytes =
6050                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6051
6052         net_stats->multicast =
6053                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6054
6055         net_stats->collisions =
6056                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6057
6058         net_stats->rx_length_errors =
6059                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6060                 stats_blk->stat_EtherStatsOverrsizePkts);
6061
6062         net_stats->rx_over_errors =
6063                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6064
6065         net_stats->rx_frame_errors =
6066                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6067
6068         net_stats->rx_crc_errors =
6069                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6070
6071         net_stats->rx_errors = net_stats->rx_length_errors +
6072                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6073                 net_stats->rx_crc_errors;
6074
6075         net_stats->tx_aborted_errors =
6076                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6077                 stats_blk->stat_Dot3StatsLateCollisions);
6078
6079         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6080             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6081                 net_stats->tx_carrier_errors = 0;
6082         else {
6083                 net_stats->tx_carrier_errors =
6084                         (unsigned long)
6085                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6086         }
6087
6088         net_stats->tx_errors =
6089                 (unsigned long)
6090                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6091                 +
6092                 net_stats->tx_aborted_errors +
6093                 net_stats->tx_carrier_errors;
6094
6095         net_stats->rx_missed_errors =
6096                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6097                 stats_blk->stat_FwRxDrop);
6098
6099         return net_stats;
6100 }
6101
6102 /* All ethtool functions called with rtnl_lock */
6103
6104 static int
6105 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6106 {
6107         struct bnx2 *bp = netdev_priv(dev);
6108         int support_serdes = 0, support_copper = 0;
6109
6110         cmd->supported = SUPPORTED_Autoneg;
6111         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6112                 support_serdes = 1;
6113                 support_copper = 1;
6114         } else if (bp->phy_port == PORT_FIBRE)
6115                 support_serdes = 1;
6116         else
6117                 support_copper = 1;
6118
6119         if (support_serdes) {
6120                 cmd->supported |= SUPPORTED_1000baseT_Full |
6121                         SUPPORTED_FIBRE;
6122                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6123                         cmd->supported |= SUPPORTED_2500baseX_Full;
6124
6125         }
6126         if (support_copper) {
6127                 cmd->supported |= SUPPORTED_10baseT_Half |
6128                         SUPPORTED_10baseT_Full |
6129                         SUPPORTED_100baseT_Half |
6130                         SUPPORTED_100baseT_Full |
6131                         SUPPORTED_1000baseT_Full |
6132                         SUPPORTED_TP;
6133
6134         }
6135
6136         spin_lock_bh(&bp->phy_lock);
6137         cmd->port = bp->phy_port;
6138         cmd->advertising = bp->advertising;
6139
6140         if (bp->autoneg & AUTONEG_SPEED) {
6141                 cmd->autoneg = AUTONEG_ENABLE;
6142         }
6143         else {
6144                 cmd->autoneg = AUTONEG_DISABLE;
6145         }
6146
6147         if (netif_carrier_ok(dev)) {
6148                 cmd->speed = bp->line_speed;
6149                 cmd->duplex = bp->duplex;
6150         }
6151         else {
6152                 cmd->speed = -1;
6153                 cmd->duplex = -1;
6154         }
6155         spin_unlock_bh(&bp->phy_lock);
6156
6157         cmd->transceiver = XCVR_INTERNAL;
6158         cmd->phy_address = bp->phy_addr;
6159
6160         return 0;
6161 }
6162
6163 static int
6164 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6165 {
6166         struct bnx2 *bp = netdev_priv(dev);
6167         u8 autoneg = bp->autoneg;
6168         u8 req_duplex = bp->req_duplex;
6169         u16 req_line_speed = bp->req_line_speed;
6170         u32 advertising = bp->advertising;
6171         int err = -EINVAL;
6172
6173         spin_lock_bh(&bp->phy_lock);
6174
6175         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6176                 goto err_out_unlock;
6177
6178         if (cmd->port != bp->phy_port &&
6179             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6180                 goto err_out_unlock;
6181
6182         if (cmd->autoneg == AUTONEG_ENABLE) {
6183                 autoneg |= AUTONEG_SPEED;
6184
6185                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6186
6187                 /* allow advertising 1 speed */
6188                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6189                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6190                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6191                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6192
6193                         if (cmd->port == PORT_FIBRE)
6194                                 goto err_out_unlock;
6195
6196                         advertising = cmd->advertising;
6197
6198                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6199                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6200                             (cmd->port == PORT_TP))
6201                                 goto err_out_unlock;
6202                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6203                         advertising = cmd->advertising;
6204                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6205                         goto err_out_unlock;
6206                 else {
6207                         if (cmd->port == PORT_FIBRE)
6208                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6209                         else
6210                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6211                 }
6212                 advertising |= ADVERTISED_Autoneg;
6213         }
6214         else {
6215                 if (cmd->port == PORT_FIBRE) {
6216                         if ((cmd->speed != SPEED_1000 &&
6217                              cmd->speed != SPEED_2500) ||
6218                             (cmd->duplex != DUPLEX_FULL))
6219                                 goto err_out_unlock;
6220
6221                         if (cmd->speed == SPEED_2500 &&
6222                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6223                                 goto err_out_unlock;
6224                 }
6225                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6226                         goto err_out_unlock;
6227
6228                 autoneg &= ~AUTONEG_SPEED;
6229                 req_line_speed = cmd->speed;
6230                 req_duplex = cmd->duplex;
6231                 advertising = 0;
6232         }
6233
6234         bp->autoneg = autoneg;
6235         bp->advertising = advertising;
6236         bp->req_line_speed = req_line_speed;
6237         bp->req_duplex = req_duplex;
6238
6239         err = bnx2_setup_phy(bp, cmd->port);
6240
6241 err_out_unlock:
6242         spin_unlock_bh(&bp->phy_lock);
6243
6244         return err;
6245 }
6246
6247 static void
6248 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6249 {
6250         struct bnx2 *bp = netdev_priv(dev);
6251
6252         strcpy(info->driver, DRV_MODULE_NAME);
6253         strcpy(info->version, DRV_MODULE_VERSION);
6254         strcpy(info->bus_info, pci_name(bp->pdev));
6255         strcpy(info->fw_version, bp->fw_version);
6256 }
6257
6258 #define BNX2_REGDUMP_LEN                (32 * 1024)
6259
6260 static int
6261 bnx2_get_regs_len(struct net_device *dev)
6262 {
6263         return BNX2_REGDUMP_LEN;
6264 }
6265
6266 static void
6267 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6268 {
6269         u32 *p = _p, i, offset;
6270         u8 *orig_p = _p;
6271         struct bnx2 *bp = netdev_priv(dev);
6272         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6273                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6274                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6275                                  0x1040, 0x1048, 0x1080, 0x10a4,
6276                                  0x1400, 0x1490, 0x1498, 0x14f0,
6277                                  0x1500, 0x155c, 0x1580, 0x15dc,
6278                                  0x1600, 0x1658, 0x1680, 0x16d8,
6279                                  0x1800, 0x1820, 0x1840, 0x1854,
6280                                  0x1880, 0x1894, 0x1900, 0x1984,
6281                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6282                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6283                                  0x2000, 0x2030, 0x23c0, 0x2400,
6284                                  0x2800, 0x2820, 0x2830, 0x2850,
6285                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6286                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6287                                  0x4080, 0x4090, 0x43c0, 0x4458,
6288                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6289                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6290                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6291                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6292                                  0x6800, 0x6848, 0x684c, 0x6860,
6293                                  0x6888, 0x6910, 0x8000 };
6294
6295         regs->version = 0;
6296
6297         memset(p, 0, BNX2_REGDUMP_LEN);
6298
6299         if (!netif_running(bp->dev))
6300                 return;
6301
6302         i = 0;
6303         offset = reg_boundaries[0];
6304         p += offset;
6305         while (offset < BNX2_REGDUMP_LEN) {
6306                 *p++ = REG_RD(bp, offset);
6307                 offset += 4;
6308                 if (offset == reg_boundaries[i + 1]) {
6309                         offset = reg_boundaries[i + 2];
6310                         p = (u32 *) (orig_p + offset);
6311                         i += 2;
6312                 }
6313         }
6314 }
6315
6316 static void
6317 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6318 {
6319         struct bnx2 *bp = netdev_priv(dev);
6320
6321         if (bp->flags & BNX2_FLAG_NO_WOL) {
6322                 wol->supported = 0;
6323                 wol->wolopts = 0;
6324         }
6325         else {
6326                 wol->supported = WAKE_MAGIC;
6327                 if (bp->wol)
6328                         wol->wolopts = WAKE_MAGIC;
6329                 else
6330                         wol->wolopts = 0;
6331         }
6332         memset(&wol->sopass, 0, sizeof(wol->sopass));
6333 }
6334
6335 static int
6336 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6337 {
6338         struct bnx2 *bp = netdev_priv(dev);
6339
6340         if (wol->wolopts & ~WAKE_MAGIC)
6341                 return -EINVAL;
6342
6343         if (wol->wolopts & WAKE_MAGIC) {
6344                 if (bp->flags & BNX2_FLAG_NO_WOL)
6345                         return -EINVAL;
6346
6347                 bp->wol = 1;
6348         }
6349         else {
6350                 bp->wol = 0;
6351         }
6352         return 0;
6353 }
6354
6355 static int
6356 bnx2_nway_reset(struct net_device *dev)
6357 {
6358         struct bnx2 *bp = netdev_priv(dev);
6359         u32 bmcr;
6360
6361         if (!(bp->autoneg & AUTONEG_SPEED)) {
6362                 return -EINVAL;
6363         }
6364
6365         spin_lock_bh(&bp->phy_lock);
6366
6367         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6368                 int rc;
6369
6370                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6371                 spin_unlock_bh(&bp->phy_lock);
6372                 return rc;
6373         }
6374
6375         /* Force a link down visible on the other side */
6376         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6377                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6378                 spin_unlock_bh(&bp->phy_lock);
6379
6380                 msleep(20);
6381
6382                 spin_lock_bh(&bp->phy_lock);
6383
6384                 bp->current_interval = SERDES_AN_TIMEOUT;
6385                 bp->serdes_an_pending = 1;
6386                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6387         }
6388
6389         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6390         bmcr &= ~BMCR_LOOPBACK;
6391         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6392
6393         spin_unlock_bh(&bp->phy_lock);
6394
6395         return 0;
6396 }
6397
6398 static int
6399 bnx2_get_eeprom_len(struct net_device *dev)
6400 {
6401         struct bnx2 *bp = netdev_priv(dev);
6402
6403         if (bp->flash_info == NULL)
6404                 return 0;
6405
6406         return (int) bp->flash_size;
6407 }
6408
6409 static int
6410 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6411                 u8 *eebuf)
6412 {
6413         struct bnx2 *bp = netdev_priv(dev);
6414         int rc;
6415
6416         /* parameters already validated in ethtool_get_eeprom */
6417
6418         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6419
6420         return rc;
6421 }
6422
6423 static int
6424 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6425                 u8 *eebuf)
6426 {
6427         struct bnx2 *bp = netdev_priv(dev);
6428         int rc;
6429
6430         /* parameters already validated in ethtool_set_eeprom */
6431
6432         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6433
6434         return rc;
6435 }
6436
6437 static int
6438 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6439 {
6440         struct bnx2 *bp = netdev_priv(dev);
6441
6442         memset(coal, 0, sizeof(struct ethtool_coalesce));
6443
6444         coal->rx_coalesce_usecs = bp->rx_ticks;
6445         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6446         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6447         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6448
6449         coal->tx_coalesce_usecs = bp->tx_ticks;
6450         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6451         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6452         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6453
6454         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6455
6456         return 0;
6457 }
6458
6459 static int
6460 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6461 {
6462         struct bnx2 *bp = netdev_priv(dev);
6463
6464         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6465         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6466
6467         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6468         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6469
6470         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6471         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6472
6473         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6474         if (bp->rx_quick_cons_trip_int > 0xff)
6475                 bp->rx_quick_cons_trip_int = 0xff;
6476
6477         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6478         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6479
6480         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6481         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6482
6483         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6484         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6485
6486         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6487         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6488                 0xff;
6489
6490         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6491         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6492                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6493                         bp->stats_ticks = USEC_PER_SEC;
6494         }
6495         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6496                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6497         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6498
6499         if (netif_running(bp->dev)) {
6500                 bnx2_netif_stop(bp);
6501                 bnx2_init_nic(bp, 0);
6502                 bnx2_netif_start(bp);
6503         }
6504
6505         return 0;
6506 }
6507
6508 static void
6509 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6510 {
6511         struct bnx2 *bp = netdev_priv(dev);
6512
6513         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6514         ering->rx_mini_max_pending = 0;
6515         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6516
6517         ering->rx_pending = bp->rx_ring_size;
6518         ering->rx_mini_pending = 0;
6519         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6520
6521         ering->tx_max_pending = MAX_TX_DESC_CNT;
6522         ering->tx_pending = bp->tx_ring_size;
6523 }
6524
6525 static int
6526 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6527 {
6528         if (netif_running(bp->dev)) {
6529                 bnx2_netif_stop(bp);
6530                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6531                 bnx2_free_skbs(bp);
6532                 bnx2_free_mem(bp);
6533         }
6534
6535         bnx2_set_rx_ring_size(bp, rx);
6536         bp->tx_ring_size = tx;
6537
6538         if (netif_running(bp->dev)) {
6539                 int rc;
6540
6541                 rc = bnx2_alloc_mem(bp);
6542                 if (rc)
6543                         return rc;
6544                 bnx2_init_nic(bp, 0);
6545                 bnx2_netif_start(bp);
6546         }
6547         return 0;
6548 }
6549
6550 static int
6551 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6552 {
6553         struct bnx2 *bp = netdev_priv(dev);
6554         int rc;
6555
6556         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6557                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6558                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6559
6560                 return -EINVAL;
6561         }
6562         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6563         return rc;
6564 }
6565
6566 static void
6567 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6568 {
6569         struct bnx2 *bp = netdev_priv(dev);
6570
6571         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6572         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6573         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6574 }
6575
6576 static int
6577 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6578 {
6579         struct bnx2 *bp = netdev_priv(dev);
6580
6581         bp->req_flow_ctrl = 0;
6582         if (epause->rx_pause)
6583                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6584         if (epause->tx_pause)
6585                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6586
6587         if (epause->autoneg) {
6588                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6589         }
6590         else {
6591                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6592         }
6593
6594         spin_lock_bh(&bp->phy_lock);
6595
6596         bnx2_setup_phy(bp, bp->phy_port);
6597
6598         spin_unlock_bh(&bp->phy_lock);
6599
6600         return 0;
6601 }
6602
6603 static u32
6604 bnx2_get_rx_csum(struct net_device *dev)
6605 {
6606         struct bnx2 *bp = netdev_priv(dev);
6607
6608         return bp->rx_csum;
6609 }
6610
6611 static int
6612 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6613 {
6614         struct bnx2 *bp = netdev_priv(dev);
6615
6616         bp->rx_csum = data;
6617         return 0;
6618 }
6619
6620 static int
6621 bnx2_set_tso(struct net_device *dev, u32 data)
6622 {
6623         struct bnx2 *bp = netdev_priv(dev);
6624
6625         if (data) {
6626                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6627                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6628                         dev->features |= NETIF_F_TSO6;
6629         } else
6630                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6631                                    NETIF_F_TSO_ECN);
6632         return 0;
6633 }
6634
6635 #define BNX2_NUM_STATS 46
6636
6637 static struct {
6638         char string[ETH_GSTRING_LEN];
6639 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6640         { "rx_bytes" },
6641         { "rx_error_bytes" },
6642         { "tx_bytes" },
6643         { "tx_error_bytes" },
6644         { "rx_ucast_packets" },
6645         { "rx_mcast_packets" },
6646         { "rx_bcast_packets" },
6647         { "tx_ucast_packets" },
6648         { "tx_mcast_packets" },
6649         { "tx_bcast_packets" },
6650         { "tx_mac_errors" },
6651         { "tx_carrier_errors" },
6652         { "rx_crc_errors" },
6653         { "rx_align_errors" },
6654         { "tx_single_collisions" },
6655         { "tx_multi_collisions" },
6656         { "tx_deferred" },
6657         { "tx_excess_collisions" },
6658         { "tx_late_collisions" },
6659         { "tx_total_collisions" },
6660         { "rx_fragments" },
6661         { "rx_jabbers" },
6662         { "rx_undersize_packets" },
6663         { "rx_oversize_packets" },
6664         { "rx_64_byte_packets" },
6665         { "rx_65_to_127_byte_packets" },
6666         { "rx_128_to_255_byte_packets" },
6667         { "rx_256_to_511_byte_packets" },
6668         { "rx_512_to_1023_byte_packets" },
6669         { "rx_1024_to_1522_byte_packets" },
6670         { "rx_1523_to_9022_byte_packets" },
6671         { "tx_64_byte_packets" },
6672         { "tx_65_to_127_byte_packets" },
6673         { "tx_128_to_255_byte_packets" },
6674         { "tx_256_to_511_byte_packets" },
6675         { "tx_512_to_1023_byte_packets" },
6676         { "tx_1024_to_1522_byte_packets" },
6677         { "tx_1523_to_9022_byte_packets" },
6678         { "rx_xon_frames" },
6679         { "rx_xoff_frames" },
6680         { "tx_xon_frames" },
6681         { "tx_xoff_frames" },
6682         { "rx_mac_ctrl_frames" },
6683         { "rx_filtered_packets" },
6684         { "rx_discards" },
6685         { "rx_fw_discards" },
6686 };
6687
6688 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6689
6690 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6691     STATS_OFFSET32(stat_IfHCInOctets_hi),
6692     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6693     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6694     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6695     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6696     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6697     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6698     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6699     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6700     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6701     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6702     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6703     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6704     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6705     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6706     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6707     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6708     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6709     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6710     STATS_OFFSET32(stat_EtherStatsCollisions),
6711     STATS_OFFSET32(stat_EtherStatsFragments),
6712     STATS_OFFSET32(stat_EtherStatsJabbers),
6713     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6714     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6715     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6716     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6717     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6718     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6719     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6720     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6721     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6722     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6723     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6724     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6725     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6726     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6727     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6728     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6729     STATS_OFFSET32(stat_XonPauseFramesReceived),
6730     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6731     STATS_OFFSET32(stat_OutXonSent),
6732     STATS_OFFSET32(stat_OutXoffSent),
6733     STATS_OFFSET32(stat_MacControlFramesReceived),
6734     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6735     STATS_OFFSET32(stat_IfInMBUFDiscards),
6736     STATS_OFFSET32(stat_FwRxDrop),
6737 };
6738
6739 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6740  * skipped because of errata.
6741  */
6742 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6743         8,0,8,8,8,8,8,8,8,8,
6744         4,0,4,4,4,4,4,4,4,4,
6745         4,4,4,4,4,4,4,4,4,4,
6746         4,4,4,4,4,4,4,4,4,4,
6747         4,4,4,4,4,4,
6748 };
6749
6750 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6751         8,0,8,8,8,8,8,8,8,8,
6752         4,4,4,4,4,4,4,4,4,4,
6753         4,4,4,4,4,4,4,4,4,4,
6754         4,4,4,4,4,4,4,4,4,4,
6755         4,4,4,4,4,4,
6756 };
6757
6758 #define BNX2_NUM_TESTS 6
6759
6760 static struct {
6761         char string[ETH_GSTRING_LEN];
6762 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6763         { "register_test (offline)" },
6764         { "memory_test (offline)" },
6765         { "loopback_test (offline)" },
6766         { "nvram_test (online)" },
6767         { "interrupt_test (online)" },
6768         { "link_test (online)" },
6769 };
6770
6771 static int
6772 bnx2_get_sset_count(struct net_device *dev, int sset)
6773 {
6774         switch (sset) {
6775         case ETH_SS_TEST:
6776                 return BNX2_NUM_TESTS;
6777         case ETH_SS_STATS:
6778                 return BNX2_NUM_STATS;
6779         default:
6780                 return -EOPNOTSUPP;
6781         }
6782 }
6783
6784 static void
6785 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6786 {
6787         struct bnx2 *bp = netdev_priv(dev);
6788
6789         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6790         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6791                 int i;
6792
6793                 bnx2_netif_stop(bp);
6794                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6795                 bnx2_free_skbs(bp);
6796
6797                 if (bnx2_test_registers(bp) != 0) {
6798                         buf[0] = 1;
6799                         etest->flags |= ETH_TEST_FL_FAILED;
6800                 }
6801                 if (bnx2_test_memory(bp) != 0) {
6802                         buf[1] = 1;
6803                         etest->flags |= ETH_TEST_FL_FAILED;
6804                 }
6805                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6806                         etest->flags |= ETH_TEST_FL_FAILED;
6807
6808                 if (!netif_running(bp->dev)) {
6809                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6810                 }
6811                 else {
6812                         bnx2_init_nic(bp, 1);
6813                         bnx2_netif_start(bp);
6814                 }
6815
6816                 /* wait for link up */
6817                 for (i = 0; i < 7; i++) {
6818                         if (bp->link_up)
6819                                 break;
6820                         msleep_interruptible(1000);
6821                 }
6822         }
6823
6824         if (bnx2_test_nvram(bp) != 0) {
6825                 buf[3] = 1;
6826                 etest->flags |= ETH_TEST_FL_FAILED;
6827         }
6828         if (bnx2_test_intr(bp) != 0) {
6829                 buf[4] = 1;
6830                 etest->flags |= ETH_TEST_FL_FAILED;
6831         }
6832
6833         if (bnx2_test_link(bp) != 0) {
6834                 buf[5] = 1;
6835                 etest->flags |= ETH_TEST_FL_FAILED;
6836
6837         }
6838 }
6839
6840 static void
6841 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6842 {
6843         switch (stringset) {
6844         case ETH_SS_STATS:
6845                 memcpy(buf, bnx2_stats_str_arr,
6846                         sizeof(bnx2_stats_str_arr));
6847                 break;
6848         case ETH_SS_TEST:
6849                 memcpy(buf, bnx2_tests_str_arr,
6850                         sizeof(bnx2_tests_str_arr));
6851                 break;
6852         }
6853 }
6854
6855 static void
6856 bnx2_get_ethtool_stats(struct net_device *dev,
6857                 struct ethtool_stats *stats, u64 *buf)
6858 {
6859         struct bnx2 *bp = netdev_priv(dev);
6860         int i;
6861         u32 *hw_stats = (u32 *) bp->stats_blk;
6862         u8 *stats_len_arr = NULL;
6863
6864         if (hw_stats == NULL) {
6865                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6866                 return;
6867         }
6868
6869         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6870             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6871             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6872             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6873                 stats_len_arr = bnx2_5706_stats_len_arr;
6874         else
6875                 stats_len_arr = bnx2_5708_stats_len_arr;
6876
6877         for (i = 0; i < BNX2_NUM_STATS; i++) {
6878                 if (stats_len_arr[i] == 0) {
6879                         /* skip this counter */
6880                         buf[i] = 0;
6881                         continue;
6882                 }
6883                 if (stats_len_arr[i] == 4) {
6884                         /* 4-byte counter */
6885                         buf[i] = (u64)
6886                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6887                         continue;
6888                 }
6889                 /* 8-byte counter */
6890                 buf[i] = (((u64) *(hw_stats +
6891                                         bnx2_stats_offset_arr[i])) << 32) +
6892                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6893         }
6894 }
6895
6896 static int
6897 bnx2_phys_id(struct net_device *dev, u32 data)
6898 {
6899         struct bnx2 *bp = netdev_priv(dev);
6900         int i;
6901         u32 save;
6902
6903         if (data == 0)
6904                 data = 2;
6905
6906         save = REG_RD(bp, BNX2_MISC_CFG);
6907         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6908
6909         for (i = 0; i < (data * 2); i++) {
6910                 if ((i % 2) == 0) {
6911                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6912                 }
6913                 else {
6914                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6915                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6916                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6917                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6918                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6919                                 BNX2_EMAC_LED_TRAFFIC);
6920                 }
6921                 msleep_interruptible(500);
6922                 if (signal_pending(current))
6923                         break;
6924         }
6925         REG_WR(bp, BNX2_EMAC_LED, 0);
6926         REG_WR(bp, BNX2_MISC_CFG, save);
6927         return 0;
6928 }
6929
6930 static int
6931 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6932 {
6933         struct bnx2 *bp = netdev_priv(dev);
6934
6935         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6936                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6937         else
6938                 return (ethtool_op_set_tx_csum(dev, data));
6939 }
6940
6941 static const struct ethtool_ops bnx2_ethtool_ops = {
6942         .get_settings           = bnx2_get_settings,
6943         .set_settings           = bnx2_set_settings,
6944         .get_drvinfo            = bnx2_get_drvinfo,
6945         .get_regs_len           = bnx2_get_regs_len,
6946         .get_regs               = bnx2_get_regs,
6947         .get_wol                = bnx2_get_wol,
6948         .set_wol                = bnx2_set_wol,
6949         .nway_reset             = bnx2_nway_reset,
6950         .get_link               = ethtool_op_get_link,
6951         .get_eeprom_len         = bnx2_get_eeprom_len,
6952         .get_eeprom             = bnx2_get_eeprom,
6953         .set_eeprom             = bnx2_set_eeprom,
6954         .get_coalesce           = bnx2_get_coalesce,
6955         .set_coalesce           = bnx2_set_coalesce,
6956         .get_ringparam          = bnx2_get_ringparam,
6957         .set_ringparam          = bnx2_set_ringparam,
6958         .get_pauseparam         = bnx2_get_pauseparam,
6959         .set_pauseparam         = bnx2_set_pauseparam,
6960         .get_rx_csum            = bnx2_get_rx_csum,
6961         .set_rx_csum            = bnx2_set_rx_csum,
6962         .set_tx_csum            = bnx2_set_tx_csum,
6963         .set_sg                 = ethtool_op_set_sg,
6964         .set_tso                = bnx2_set_tso,
6965         .self_test              = bnx2_self_test,
6966         .get_strings            = bnx2_get_strings,
6967         .phys_id                = bnx2_phys_id,
6968         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6969         .get_sset_count         = bnx2_get_sset_count,
6970 };
6971
6972 /* Called with rtnl_lock */
6973 static int
6974 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6975 {
6976         struct mii_ioctl_data *data = if_mii(ifr);
6977         struct bnx2 *bp = netdev_priv(dev);
6978         int err;
6979
6980         switch(cmd) {
6981         case SIOCGMIIPHY:
6982                 data->phy_id = bp->phy_addr;
6983
6984                 /* fallthru */
6985         case SIOCGMIIREG: {
6986                 u32 mii_regval;
6987
6988                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6989                         return -EOPNOTSUPP;
6990
6991                 if (!netif_running(dev))
6992                         return -EAGAIN;
6993
6994                 spin_lock_bh(&bp->phy_lock);
6995                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6996                 spin_unlock_bh(&bp->phy_lock);
6997
6998                 data->val_out = mii_regval;
6999
7000                 return err;
7001         }
7002
7003         case SIOCSMIIREG:
7004                 if (!capable(CAP_NET_ADMIN))
7005                         return -EPERM;
7006
7007                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7008                         return -EOPNOTSUPP;
7009
7010                 if (!netif_running(dev))
7011                         return -EAGAIN;
7012
7013                 spin_lock_bh(&bp->phy_lock);
7014                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7015                 spin_unlock_bh(&bp->phy_lock);
7016
7017                 return err;
7018
7019         default:
7020                 /* do nothing */
7021                 break;
7022         }
7023         return -EOPNOTSUPP;
7024 }
7025
7026 /* Called with rtnl_lock */
7027 static int
7028 bnx2_change_mac_addr(struct net_device *dev, void *p)
7029 {
7030         struct sockaddr *addr = p;
7031         struct bnx2 *bp = netdev_priv(dev);
7032
7033         if (!is_valid_ether_addr(addr->sa_data))
7034                 return -EINVAL;
7035
7036         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7037         if (netif_running(dev))
7038                 bnx2_set_mac_addr(bp);
7039
7040         return 0;
7041 }
7042
7043 /* Called with rtnl_lock */
7044 static int
7045 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7046 {
7047         struct bnx2 *bp = netdev_priv(dev);
7048
7049         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7050                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7051                 return -EINVAL;
7052
7053         dev->mtu = new_mtu;
7054         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7055 }
7056
7057 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7058 static void
7059 poll_bnx2(struct net_device *dev)
7060 {
7061         struct bnx2 *bp = netdev_priv(dev);
7062
7063         disable_irq(bp->pdev->irq);
7064         bnx2_interrupt(bp->pdev->irq, dev);
7065         enable_irq(bp->pdev->irq);
7066 }
7067 #endif
7068
7069 static void __devinit
7070 bnx2_get_5709_media(struct bnx2 *bp)
7071 {
7072         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7073         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7074         u32 strap;
7075
7076         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7077                 return;
7078         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7079                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7080                 return;
7081         }
7082
7083         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7084                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7085         else
7086                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7087
7088         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7089                 switch (strap) {
7090                 case 0x4:
7091                 case 0x5:
7092                 case 0x6:
7093                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7094                         return;
7095                 }
7096         } else {
7097                 switch (strap) {
7098                 case 0x1:
7099                 case 0x2:
7100                 case 0x4:
7101                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7102                         return;
7103                 }
7104         }
7105 }
7106
7107 static void __devinit
7108 bnx2_get_pci_speed(struct bnx2 *bp)
7109 {
7110         u32 reg;
7111
7112         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7113         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7114                 u32 clkreg;
7115
7116                 bp->flags |= BNX2_FLAG_PCIX;
7117
7118                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7119
7120                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7121                 switch (clkreg) {
7122                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7123                         bp->bus_speed_mhz = 133;
7124                         break;
7125
7126                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7127                         bp->bus_speed_mhz = 100;
7128                         break;
7129
7130                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7131                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7132                         bp->bus_speed_mhz = 66;
7133                         break;
7134
7135                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7136                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7137                         bp->bus_speed_mhz = 50;
7138                         break;
7139
7140                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7141                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7142                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7143                         bp->bus_speed_mhz = 33;
7144                         break;
7145                 }
7146         }
7147         else {
7148                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7149                         bp->bus_speed_mhz = 66;
7150                 else
7151                         bp->bus_speed_mhz = 33;
7152         }
7153
7154         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7155                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7156
7157 }
7158
7159 static int __devinit
7160 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7161 {
7162         struct bnx2 *bp;
7163         unsigned long mem_len;
7164         int rc, i, j;
7165         u32 reg;
7166         u64 dma_mask, persist_dma_mask;
7167
7168         SET_NETDEV_DEV(dev, &pdev->dev);
7169         bp = netdev_priv(dev);
7170
7171         bp->flags = 0;
7172         bp->phy_flags = 0;
7173
7174         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7175         rc = pci_enable_device(pdev);
7176         if (rc) {
7177                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7178                 goto err_out;
7179         }
7180
7181         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7182                 dev_err(&pdev->dev,
7183                         "Cannot find PCI device base address, aborting.\n");
7184                 rc = -ENODEV;
7185                 goto err_out_disable;
7186         }
7187
7188         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7189         if (rc) {
7190                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7191                 goto err_out_disable;
7192         }
7193
7194         pci_set_master(pdev);
7195         pci_save_state(pdev);
7196
7197         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7198         if (bp->pm_cap == 0) {
7199                 dev_err(&pdev->dev,
7200                         "Cannot find power management capability, aborting.\n");
7201                 rc = -EIO;
7202                 goto err_out_release;
7203         }
7204
7205         bp->dev = dev;
7206         bp->pdev = pdev;
7207
7208         spin_lock_init(&bp->phy_lock);
7209         spin_lock_init(&bp->indirect_lock);
7210         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7211
7212         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7213         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7214         dev->mem_end = dev->mem_start + mem_len;
7215         dev->irq = pdev->irq;
7216
7217         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7218
7219         if (!bp->regview) {
7220                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7221                 rc = -ENOMEM;
7222                 goto err_out_release;
7223         }
7224
7225         /* Configure byte swap and enable write to the reg_window registers.
7226          * Rely on CPU to do target byte swapping on big endian systems
7227          * The chip's target access swapping will not swap all accesses
7228          */
7229         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7230                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7231                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7232
7233         bnx2_set_power_state(bp, PCI_D0);
7234
7235         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7236
7237         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7238                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7239                         dev_err(&pdev->dev,
7240                                 "Cannot find PCIE capability, aborting.\n");
7241                         rc = -EIO;
7242                         goto err_out_unmap;
7243                 }
7244                 bp->flags |= BNX2_FLAG_PCIE;
7245                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7246                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7247         } else {
7248                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7249                 if (bp->pcix_cap == 0) {
7250                         dev_err(&pdev->dev,
7251                                 "Cannot find PCIX capability, aborting.\n");
7252                         rc = -EIO;
7253                         goto err_out_unmap;
7254                 }
7255         }
7256
7257         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7258                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7259                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7260         }
7261
7262         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7263                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7264                         bp->flags |= BNX2_FLAG_MSI_CAP;
7265         }
7266
7267         /* 5708 cannot support DMA addresses > 40-bit.  */
7268         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7269                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7270         else
7271                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7272
7273         /* Configure DMA attributes. */
7274         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7275                 dev->features |= NETIF_F_HIGHDMA;
7276                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7277                 if (rc) {
7278                         dev_err(&pdev->dev,
7279                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7280                         goto err_out_unmap;
7281                 }
7282         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7283                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7284                 goto err_out_unmap;
7285         }
7286
7287         if (!(bp->flags & BNX2_FLAG_PCIE))
7288                 bnx2_get_pci_speed(bp);
7289
7290         /* 5706A0 may falsely detect SERR and PERR. */
7291         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7292                 reg = REG_RD(bp, PCI_COMMAND);
7293                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7294                 REG_WR(bp, PCI_COMMAND, reg);
7295         }
7296         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7297                 !(bp->flags & BNX2_FLAG_PCIX)) {
7298
7299                 dev_err(&pdev->dev,
7300                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7301                 goto err_out_unmap;
7302         }
7303
7304         bnx2_init_nvram(bp);
7305
7306         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7307
7308         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7309             BNX2_SHM_HDR_SIGNATURE_SIG) {
7310                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7311
7312                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7313         } else
7314                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7315
7316         /* Get the permanent MAC address.  First we need to make sure the
7317          * firmware is actually running.
7318          */
7319         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7320
7321         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7322             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7323                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7324                 rc = -ENODEV;
7325                 goto err_out_unmap;
7326         }
7327
7328         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7329         for (i = 0, j = 0; i < 3; i++) {
7330                 u8 num, k, skip0;
7331
7332                 num = (u8) (reg >> (24 - (i * 8)));
7333                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7334                         if (num >= k || !skip0 || k == 1) {
7335                                 bp->fw_version[j++] = (num / k) + '0';
7336                                 skip0 = 0;
7337                         }
7338                 }
7339                 if (i != 2)
7340                         bp->fw_version[j++] = '.';
7341         }
7342         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7343         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7344                 bp->wol = 1;
7345
7346         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7347                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7348
7349                 for (i = 0; i < 30; i++) {
7350                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7351                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7352                                 break;
7353                         msleep(10);
7354                 }
7355         }
7356         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7357         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7358         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7359             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7360                 int i;
7361                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7362
7363                 bp->fw_version[j++] = ' ';
7364                 for (i = 0; i < 3; i++) {
7365                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7366                         reg = swab32(reg);
7367                         memcpy(&bp->fw_version[j], &reg, 4);
7368                         j += 4;
7369                 }
7370         }
7371
7372         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7373         bp->mac_addr[0] = (u8) (reg >> 8);
7374         bp->mac_addr[1] = (u8) reg;
7375
7376         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7377         bp->mac_addr[2] = (u8) (reg >> 24);
7378         bp->mac_addr[3] = (u8) (reg >> 16);
7379         bp->mac_addr[4] = (u8) (reg >> 8);
7380         bp->mac_addr[5] = (u8) reg;
7381
7382         bp->tx_ring_size = MAX_TX_DESC_CNT;
7383         bnx2_set_rx_ring_size(bp, 255);
7384
7385         bp->rx_csum = 1;
7386
7387         bp->tx_quick_cons_trip_int = 20;
7388         bp->tx_quick_cons_trip = 20;
7389         bp->tx_ticks_int = 80;
7390         bp->tx_ticks = 80;
7391
7392         bp->rx_quick_cons_trip_int = 6;
7393         bp->rx_quick_cons_trip = 6;
7394         bp->rx_ticks_int = 18;
7395         bp->rx_ticks = 18;
7396
7397         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7398
7399         bp->timer_interval =  HZ;
7400         bp->current_interval =  HZ;
7401
7402         bp->phy_addr = 1;
7403
7404         /* Disable WOL support if we are running on a SERDES chip. */
7405         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7406                 bnx2_get_5709_media(bp);
7407         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7408                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7409
7410         bp->phy_port = PORT_TP;
7411         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7412                 bp->phy_port = PORT_FIBRE;
7413                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7414                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7415                         bp->flags |= BNX2_FLAG_NO_WOL;
7416                         bp->wol = 0;
7417                 }
7418                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7419                         /* Don't do parallel detect on this board because of
7420                          * some board problems.  The link will not go down
7421                          * if we do parallel detect.
7422                          */
7423                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7424                             pdev->subsystem_device == 0x310c)
7425                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7426                 } else {
7427                         bp->phy_addr = 2;
7428                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7429                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7430                 }
7431                 bnx2_init_remote_phy(bp);
7432
7433         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7434                    CHIP_NUM(bp) == CHIP_NUM_5708)
7435                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7436         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7437                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7438                   CHIP_REV(bp) == CHIP_REV_Bx))
7439                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7440
7441         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7442             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7443             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7444                 bp->flags |= BNX2_FLAG_NO_WOL;
7445                 bp->wol = 0;
7446         }
7447
7448         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7449                 bp->tx_quick_cons_trip_int =
7450                         bp->tx_quick_cons_trip;
7451                 bp->tx_ticks_int = bp->tx_ticks;
7452                 bp->rx_quick_cons_trip_int =
7453                         bp->rx_quick_cons_trip;
7454                 bp->rx_ticks_int = bp->rx_ticks;
7455                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7456                 bp->com_ticks_int = bp->com_ticks;
7457                 bp->cmd_ticks_int = bp->cmd_ticks;
7458         }
7459
7460         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7461          *
7462          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7463          * with byte enables disabled on the unused 32-bit word.  This is legal
7464          * but causes problems on the AMD 8132 which will eventually stop
7465          * responding after a while.
7466          *
7467          * AMD believes this incompatibility is unique to the 5706, and
7468          * prefers to locally disable MSI rather than globally disabling it.
7469          */
7470         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7471                 struct pci_dev *amd_8132 = NULL;
7472
7473                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7474                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7475                                                   amd_8132))) {
7476
7477                         if (amd_8132->revision >= 0x10 &&
7478                             amd_8132->revision <= 0x13) {
7479                                 disable_msi = 1;
7480                                 pci_dev_put(amd_8132);
7481                                 break;
7482                         }
7483                 }
7484         }
7485
7486         bnx2_set_default_link(bp);
7487         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7488
7489         init_timer(&bp->timer);
7490         bp->timer.expires = RUN_AT(bp->timer_interval);
7491         bp->timer.data = (unsigned long) bp;
7492         bp->timer.function = bnx2_timer;
7493
7494         return 0;
7495
7496 err_out_unmap:
7497         if (bp->regview) {
7498                 iounmap(bp->regview);
7499                 bp->regview = NULL;
7500         }
7501
7502 err_out_release:
7503         pci_release_regions(pdev);
7504
7505 err_out_disable:
7506         pci_disable_device(pdev);
7507         pci_set_drvdata(pdev, NULL);
7508
7509 err_out:
7510         return rc;
7511 }
7512
7513 static char * __devinit
7514 bnx2_bus_string(struct bnx2 *bp, char *str)
7515 {
7516         char *s = str;
7517
7518         if (bp->flags & BNX2_FLAG_PCIE) {
7519                 s += sprintf(s, "PCI Express");
7520         } else {
7521                 s += sprintf(s, "PCI");
7522                 if (bp->flags & BNX2_FLAG_PCIX)
7523                         s += sprintf(s, "-X");
7524                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7525                         s += sprintf(s, " 32-bit");
7526                 else
7527                         s += sprintf(s, " 64-bit");
7528                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7529         }
7530         return str;
7531 }
7532
7533 static void __devinit
7534 bnx2_init_napi(struct bnx2 *bp)
7535 {
7536         int i;
7537
7538         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7539                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7540                 int (*poll)(struct napi_struct *, int);
7541
7542                 if (i == 0)
7543                         poll = bnx2_poll;
7544                 else
7545                         poll = bnx2_tx_poll;
7546
7547                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7548                 bnapi->bp = bp;
7549         }
7550 }
7551
7552 static int __devinit
7553 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7554 {
7555         static int version_printed = 0;
7556         struct net_device *dev = NULL;
7557         struct bnx2 *bp;
7558         int rc;
7559         char str[40];
7560         DECLARE_MAC_BUF(mac);
7561
7562         if (version_printed++ == 0)
7563                 printk(KERN_INFO "%s", version);
7564
7565         /* dev zeroed in init_etherdev */
7566         dev = alloc_etherdev(sizeof(*bp));
7567
7568         if (!dev)
7569                 return -ENOMEM;
7570
7571         rc = bnx2_init_board(pdev, dev);
7572         if (rc < 0) {
7573                 free_netdev(dev);
7574                 return rc;
7575         }
7576
7577         dev->open = bnx2_open;
7578         dev->hard_start_xmit = bnx2_start_xmit;
7579         dev->stop = bnx2_close;
7580         dev->get_stats = bnx2_get_stats;
7581         dev->set_multicast_list = bnx2_set_rx_mode;
7582         dev->do_ioctl = bnx2_ioctl;
7583         dev->set_mac_address = bnx2_change_mac_addr;
7584         dev->change_mtu = bnx2_change_mtu;
7585         dev->tx_timeout = bnx2_tx_timeout;
7586         dev->watchdog_timeo = TX_TIMEOUT;
7587 #ifdef BCM_VLAN
7588         dev->vlan_rx_register = bnx2_vlan_rx_register;
7589 #endif
7590         dev->ethtool_ops = &bnx2_ethtool_ops;
7591
7592         bp = netdev_priv(dev);
7593         bnx2_init_napi(bp);
7594
7595 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7596         dev->poll_controller = poll_bnx2;
7597 #endif
7598
7599         pci_set_drvdata(pdev, dev);
7600
7601         memcpy(dev->dev_addr, bp->mac_addr, 6);
7602         memcpy(dev->perm_addr, bp->mac_addr, 6);
7603         bp->name = board_info[ent->driver_data].name;
7604
7605         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7606         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7607                 dev->features |= NETIF_F_IPV6_CSUM;
7608
7609 #ifdef BCM_VLAN
7610         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7611 #endif
7612         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7613         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7614                 dev->features |= NETIF_F_TSO6;
7615
7616         if ((rc = register_netdev(dev))) {
7617                 dev_err(&pdev->dev, "Cannot register net device\n");
7618                 if (bp->regview)
7619                         iounmap(bp->regview);
7620                 pci_release_regions(pdev);
7621                 pci_disable_device(pdev);
7622                 pci_set_drvdata(pdev, NULL);
7623                 free_netdev(dev);
7624                 return rc;
7625         }
7626
7627         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7628                 "IRQ %d, node addr %s\n",
7629                 dev->name,
7630                 bp->name,
7631                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7632                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7633                 bnx2_bus_string(bp, str),
7634                 dev->base_addr,
7635                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7636
7637         return 0;
7638 }
7639
7640 static void __devexit
7641 bnx2_remove_one(struct pci_dev *pdev)
7642 {
7643         struct net_device *dev = pci_get_drvdata(pdev);
7644         struct bnx2 *bp = netdev_priv(dev);
7645
7646         flush_scheduled_work();
7647
7648         unregister_netdev(dev);
7649
7650         if (bp->regview)
7651                 iounmap(bp->regview);
7652
7653         free_netdev(dev);
7654         pci_release_regions(pdev);
7655         pci_disable_device(pdev);
7656         pci_set_drvdata(pdev, NULL);
7657 }
7658
7659 static int
7660 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7661 {
7662         struct net_device *dev = pci_get_drvdata(pdev);
7663         struct bnx2 *bp = netdev_priv(dev);
7664         u32 reset_code;
7665
7666         /* PCI register 4 needs to be saved whether netif_running() or not.
7667          * MSI address and data need to be saved if using MSI and
7668          * netif_running().
7669          */
7670         pci_save_state(pdev);
7671         if (!netif_running(dev))
7672                 return 0;
7673
7674         flush_scheduled_work();
7675         bnx2_netif_stop(bp);
7676         netif_device_detach(dev);
7677         del_timer_sync(&bp->timer);
7678         if (bp->flags & BNX2_FLAG_NO_WOL)
7679                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7680         else if (bp->wol)
7681                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7682         else
7683                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7684         bnx2_reset_chip(bp, reset_code);
7685         bnx2_free_skbs(bp);
7686         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7687         return 0;
7688 }
7689
7690 static int
7691 bnx2_resume(struct pci_dev *pdev)
7692 {
7693         struct net_device *dev = pci_get_drvdata(pdev);
7694         struct bnx2 *bp = netdev_priv(dev);
7695
7696         pci_restore_state(pdev);
7697         if (!netif_running(dev))
7698                 return 0;
7699
7700         bnx2_set_power_state(bp, PCI_D0);
7701         netif_device_attach(dev);
7702         bnx2_init_nic(bp, 1);
7703         bnx2_netif_start(bp);
7704         return 0;
7705 }
7706
7707 /**
7708  * bnx2_io_error_detected - called when PCI error is detected
7709  * @pdev: Pointer to PCI device
7710  * @state: The current pci connection state
7711  *
7712  * This function is called after a PCI bus error affecting
7713  * this device has been detected.
7714  */
7715 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7716                                                pci_channel_state_t state)
7717 {
7718         struct net_device *dev = pci_get_drvdata(pdev);
7719         struct bnx2 *bp = netdev_priv(dev);
7720
7721         rtnl_lock();
7722         netif_device_detach(dev);
7723
7724         if (netif_running(dev)) {
7725                 bnx2_netif_stop(bp);
7726                 del_timer_sync(&bp->timer);
7727                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7728         }
7729
7730         pci_disable_device(pdev);
7731         rtnl_unlock();
7732
7733         /* Request a slot slot reset. */
7734         return PCI_ERS_RESULT_NEED_RESET;
7735 }
7736
7737 /**
7738  * bnx2_io_slot_reset - called after the pci bus has been reset.
7739  * @pdev: Pointer to PCI device
7740  *
7741  * Restart the card from scratch, as if from a cold-boot.
7742  */
7743 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7744 {
7745         struct net_device *dev = pci_get_drvdata(pdev);
7746         struct bnx2 *bp = netdev_priv(dev);
7747
7748         rtnl_lock();
7749         if (pci_enable_device(pdev)) {
7750                 dev_err(&pdev->dev,
7751                         "Cannot re-enable PCI device after reset.\n");
7752                 rtnl_unlock();
7753                 return PCI_ERS_RESULT_DISCONNECT;
7754         }
7755         pci_set_master(pdev);
7756         pci_restore_state(pdev);
7757
7758         if (netif_running(dev)) {
7759                 bnx2_set_power_state(bp, PCI_D0);
7760                 bnx2_init_nic(bp, 1);
7761         }
7762
7763         rtnl_unlock();
7764         return PCI_ERS_RESULT_RECOVERED;
7765 }
7766
7767 /**
7768  * bnx2_io_resume - called when traffic can start flowing again.
7769  * @pdev: Pointer to PCI device
7770  *
7771  * This callback is called when the error recovery driver tells us that
7772  * its OK to resume normal operation.
7773  */
7774 static void bnx2_io_resume(struct pci_dev *pdev)
7775 {
7776         struct net_device *dev = pci_get_drvdata(pdev);
7777         struct bnx2 *bp = netdev_priv(dev);
7778
7779         rtnl_lock();
7780         if (netif_running(dev))
7781                 bnx2_netif_start(bp);
7782
7783         netif_device_attach(dev);
7784         rtnl_unlock();
7785 }
7786
7787 static struct pci_error_handlers bnx2_err_handler = {
7788         .error_detected = bnx2_io_error_detected,
7789         .slot_reset     = bnx2_io_slot_reset,
7790         .resume         = bnx2_io_resume,
7791 };
7792
7793 static struct pci_driver bnx2_pci_driver = {
7794         .name           = DRV_MODULE_NAME,
7795         .id_table       = bnx2_pci_tbl,
7796         .probe          = bnx2_init_one,
7797         .remove         = __devexit_p(bnx2_remove_one),
7798         .suspend        = bnx2_suspend,
7799         .resume         = bnx2_resume,
7800         .err_handler    = &bnx2_err_handler,
7801 };
7802
7803 static int __init bnx2_init(void)
7804 {
7805         return pci_register_driver(&bnx2_pci_driver);
7806 }
7807
7808 static void __exit bnx2_cleanup(void)
7809 {
7810         pci_unregister_driver(&bnx2_pci_driver);
7811 }
7812
7813 module_init(bnx2_init);
7814 module_exit(bnx2_cleanup);
7815
7816
7817