]> err.no Git - linux-2.6/blob - drivers/net/bnx2.c
[BNX2]: Remove REG_WR_IND/REG_RD_IND macros.
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.2"
60 #define DRV_MODULE_RELDATE      "January 21, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bnapi->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->ctx_pages; i++) {
504                 if (bp->ctx_blk[i]) {
505                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
506                                             bp->ctx_blk[i],
507                                             bp->ctx_blk_mapping[i]);
508                         bp->ctx_blk[i] = NULL;
509                 }
510         }
511         if (bp->status_blk) {
512                 pci_free_consistent(bp->pdev, bp->status_stats_size,
513                                     bp->status_blk, bp->status_blk_mapping);
514                 bp->status_blk = NULL;
515                 bp->stats_blk = NULL;
516         }
517         if (bp->tx_desc_ring) {
518                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519                                     bp->tx_desc_ring, bp->tx_desc_mapping);
520                 bp->tx_desc_ring = NULL;
521         }
522         kfree(bp->tx_buf_ring);
523         bp->tx_buf_ring = NULL;
524         for (i = 0; i < bp->rx_max_ring; i++) {
525                 if (bp->rx_desc_ring[i])
526                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
527                                             bp->rx_desc_ring[i],
528                                             bp->rx_desc_mapping[i]);
529                 bp->rx_desc_ring[i] = NULL;
530         }
531         vfree(bp->rx_buf_ring);
532         bp->rx_buf_ring = NULL;
533         for (i = 0; i < bp->rx_max_pg_ring; i++) {
534                 if (bp->rx_pg_desc_ring[i])
535                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536                                             bp->rx_pg_desc_ring[i],
537                                             bp->rx_pg_desc_mapping[i]);
538                 bp->rx_pg_desc_ring[i] = NULL;
539         }
540         if (bp->rx_pg_ring)
541                 vfree(bp->rx_pg_ring);
542         bp->rx_pg_ring = NULL;
543 }
544
545 static int
546 bnx2_alloc_mem(struct bnx2 *bp)
547 {
548         int i, status_blk_size;
549
550         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551         if (bp->tx_buf_ring == NULL)
552                 return -ENOMEM;
553
554         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555                                                 &bp->tx_desc_mapping);
556         if (bp->tx_desc_ring == NULL)
557                 goto alloc_mem_err;
558
559         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560         if (bp->rx_buf_ring == NULL)
561                 goto alloc_mem_err;
562
563         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
564
565         for (i = 0; i < bp->rx_max_ring; i++) {
566                 bp->rx_desc_ring[i] =
567                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568                                              &bp->rx_desc_mapping[i]);
569                 if (bp->rx_desc_ring[i] == NULL)
570                         goto alloc_mem_err;
571
572         }
573
574         if (bp->rx_pg_ring_size) {
575                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
576                                          bp->rx_max_pg_ring);
577                 if (bp->rx_pg_ring == NULL)
578                         goto alloc_mem_err;
579
580                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
581                        bp->rx_max_pg_ring);
582         }
583
584         for (i = 0; i < bp->rx_max_pg_ring; i++) {
585                 bp->rx_pg_desc_ring[i] =
586                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
587                                              &bp->rx_pg_desc_mapping[i]);
588                 if (bp->rx_pg_desc_ring[i] == NULL)
589                         goto alloc_mem_err;
590
591         }
592
593         /* Combine status and statistics blocks into one allocation. */
594         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
595         if (bp->flags & BNX2_FLAG_MSIX_CAP)
596                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
597                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
598         bp->status_stats_size = status_blk_size +
599                                 sizeof(struct statistics_block);
600
601         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602                                               &bp->status_blk_mapping);
603         if (bp->status_blk == NULL)
604                 goto alloc_mem_err;
605
606         memset(bp->status_blk, 0, bp->status_stats_size);
607
608         bp->bnx2_napi[0].status_blk = bp->status_blk;
609         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611                         struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
612
613                         bnapi->status_blk_msix = (void *)
614                                 ((unsigned long) bp->status_blk +
615                                  BNX2_SBLK_MSIX_ALIGN_SIZE * i);
616                         bnapi->int_num = i << 24;
617                 }
618         }
619
620         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
621                                   status_blk_size);
622
623         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
624
625         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
626                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
627                 if (bp->ctx_pages == 0)
628                         bp->ctx_pages = 1;
629                 for (i = 0; i < bp->ctx_pages; i++) {
630                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
631                                                 BCM_PAGE_SIZE,
632                                                 &bp->ctx_blk_mapping[i]);
633                         if (bp->ctx_blk[i] == NULL)
634                                 goto alloc_mem_err;
635                 }
636         }
637         return 0;
638
639 alloc_mem_err:
640         bnx2_free_mem(bp);
641         return -ENOMEM;
642 }
643
644 static void
645 bnx2_report_fw_link(struct bnx2 *bp)
646 {
647         u32 fw_link_status = 0;
648
649         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
650                 return;
651
652         if (bp->link_up) {
653                 u32 bmsr;
654
655                 switch (bp->line_speed) {
656                 case SPEED_10:
657                         if (bp->duplex == DUPLEX_HALF)
658                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
659                         else
660                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
661                         break;
662                 case SPEED_100:
663                         if (bp->duplex == DUPLEX_HALF)
664                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
665                         else
666                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
667                         break;
668                 case SPEED_1000:
669                         if (bp->duplex == DUPLEX_HALF)
670                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
671                         else
672                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
673                         break;
674                 case SPEED_2500:
675                         if (bp->duplex == DUPLEX_HALF)
676                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
677                         else
678                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
679                         break;
680                 }
681
682                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
683
684                 if (bp->autoneg) {
685                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
686
687                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
688                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
689
690                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
691                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
692                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
693                         else
694                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
695                 }
696         }
697         else
698                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
699
700         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
701 }
702
703 static char *
704 bnx2_xceiver_str(struct bnx2 *bp)
705 {
706         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
707                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
708                  "Copper"));
709 }
710
711 static void
712 bnx2_report_link(struct bnx2 *bp)
713 {
714         if (bp->link_up) {
715                 netif_carrier_on(bp->dev);
716                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
717                        bnx2_xceiver_str(bp));
718
719                 printk("%d Mbps ", bp->line_speed);
720
721                 if (bp->duplex == DUPLEX_FULL)
722                         printk("full duplex");
723                 else
724                         printk("half duplex");
725
726                 if (bp->flow_ctrl) {
727                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
728                                 printk(", receive ");
729                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
730                                         printk("& transmit ");
731                         }
732                         else {
733                                 printk(", transmit ");
734                         }
735                         printk("flow control ON");
736                 }
737                 printk("\n");
738         }
739         else {
740                 netif_carrier_off(bp->dev);
741                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
742                        bnx2_xceiver_str(bp));
743         }
744
745         bnx2_report_fw_link(bp);
746 }
747
748 static void
749 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
750 {
751         u32 local_adv, remote_adv;
752
753         bp->flow_ctrl = 0;
754         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
755                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
756
757                 if (bp->duplex == DUPLEX_FULL) {
758                         bp->flow_ctrl = bp->req_flow_ctrl;
759                 }
760                 return;
761         }
762
763         if (bp->duplex != DUPLEX_FULL) {
764                 return;
765         }
766
767         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
768             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
769                 u32 val;
770
771                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
772                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
773                         bp->flow_ctrl |= FLOW_CTRL_TX;
774                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
775                         bp->flow_ctrl |= FLOW_CTRL_RX;
776                 return;
777         }
778
779         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
780         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
781
782         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
783                 u32 new_local_adv = 0;
784                 u32 new_remote_adv = 0;
785
786                 if (local_adv & ADVERTISE_1000XPAUSE)
787                         new_local_adv |= ADVERTISE_PAUSE_CAP;
788                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
789                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
790                 if (remote_adv & ADVERTISE_1000XPAUSE)
791                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
792                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
793                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
794
795                 local_adv = new_local_adv;
796                 remote_adv = new_remote_adv;
797         }
798
799         /* See Table 28B-3 of 802.3ab-1999 spec. */
800         if (local_adv & ADVERTISE_PAUSE_CAP) {
801                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
802                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
803                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
804                         }
805                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
806                                 bp->flow_ctrl = FLOW_CTRL_RX;
807                         }
808                 }
809                 else {
810                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
811                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
812                         }
813                 }
814         }
815         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
816                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
817                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
818
819                         bp->flow_ctrl = FLOW_CTRL_TX;
820                 }
821         }
822 }
823
824 static int
825 bnx2_5709s_linkup(struct bnx2 *bp)
826 {
827         u32 val, speed;
828
829         bp->link_up = 1;
830
831         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
832         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
833         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
834
835         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
836                 bp->line_speed = bp->req_line_speed;
837                 bp->duplex = bp->req_duplex;
838                 return 0;
839         }
840         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
841         switch (speed) {
842                 case MII_BNX2_GP_TOP_AN_SPEED_10:
843                         bp->line_speed = SPEED_10;
844                         break;
845                 case MII_BNX2_GP_TOP_AN_SPEED_100:
846                         bp->line_speed = SPEED_100;
847                         break;
848                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
849                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
850                         bp->line_speed = SPEED_1000;
851                         break;
852                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
853                         bp->line_speed = SPEED_2500;
854                         break;
855         }
856         if (val & MII_BNX2_GP_TOP_AN_FD)
857                 bp->duplex = DUPLEX_FULL;
858         else
859                 bp->duplex = DUPLEX_HALF;
860         return 0;
861 }
862
863 static int
864 bnx2_5708s_linkup(struct bnx2 *bp)
865 {
866         u32 val;
867
868         bp->link_up = 1;
869         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
870         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
871                 case BCM5708S_1000X_STAT1_SPEED_10:
872                         bp->line_speed = SPEED_10;
873                         break;
874                 case BCM5708S_1000X_STAT1_SPEED_100:
875                         bp->line_speed = SPEED_100;
876                         break;
877                 case BCM5708S_1000X_STAT1_SPEED_1G:
878                         bp->line_speed = SPEED_1000;
879                         break;
880                 case BCM5708S_1000X_STAT1_SPEED_2G5:
881                         bp->line_speed = SPEED_2500;
882                         break;
883         }
884         if (val & BCM5708S_1000X_STAT1_FD)
885                 bp->duplex = DUPLEX_FULL;
886         else
887                 bp->duplex = DUPLEX_HALF;
888
889         return 0;
890 }
891
892 static int
893 bnx2_5706s_linkup(struct bnx2 *bp)
894 {
895         u32 bmcr, local_adv, remote_adv, common;
896
897         bp->link_up = 1;
898         bp->line_speed = SPEED_1000;
899
900         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
901         if (bmcr & BMCR_FULLDPLX) {
902                 bp->duplex = DUPLEX_FULL;
903         }
904         else {
905                 bp->duplex = DUPLEX_HALF;
906         }
907
908         if (!(bmcr & BMCR_ANENABLE)) {
909                 return 0;
910         }
911
912         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
913         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
914
915         common = local_adv & remote_adv;
916         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
917
918                 if (common & ADVERTISE_1000XFULL) {
919                         bp->duplex = DUPLEX_FULL;
920                 }
921                 else {
922                         bp->duplex = DUPLEX_HALF;
923                 }
924         }
925
926         return 0;
927 }
928
929 static int
930 bnx2_copper_linkup(struct bnx2 *bp)
931 {
932         u32 bmcr;
933
934         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
935         if (bmcr & BMCR_ANENABLE) {
936                 u32 local_adv, remote_adv, common;
937
938                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
939                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
940
941                 common = local_adv & (remote_adv >> 2);
942                 if (common & ADVERTISE_1000FULL) {
943                         bp->line_speed = SPEED_1000;
944                         bp->duplex = DUPLEX_FULL;
945                 }
946                 else if (common & ADVERTISE_1000HALF) {
947                         bp->line_speed = SPEED_1000;
948                         bp->duplex = DUPLEX_HALF;
949                 }
950                 else {
951                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
952                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
953
954                         common = local_adv & remote_adv;
955                         if (common & ADVERTISE_100FULL) {
956                                 bp->line_speed = SPEED_100;
957                                 bp->duplex = DUPLEX_FULL;
958                         }
959                         else if (common & ADVERTISE_100HALF) {
960                                 bp->line_speed = SPEED_100;
961                                 bp->duplex = DUPLEX_HALF;
962                         }
963                         else if (common & ADVERTISE_10FULL) {
964                                 bp->line_speed = SPEED_10;
965                                 bp->duplex = DUPLEX_FULL;
966                         }
967                         else if (common & ADVERTISE_10HALF) {
968                                 bp->line_speed = SPEED_10;
969                                 bp->duplex = DUPLEX_HALF;
970                         }
971                         else {
972                                 bp->line_speed = 0;
973                                 bp->link_up = 0;
974                         }
975                 }
976         }
977         else {
978                 if (bmcr & BMCR_SPEED100) {
979                         bp->line_speed = SPEED_100;
980                 }
981                 else {
982                         bp->line_speed = SPEED_10;
983                 }
984                 if (bmcr & BMCR_FULLDPLX) {
985                         bp->duplex = DUPLEX_FULL;
986                 }
987                 else {
988                         bp->duplex = DUPLEX_HALF;
989                 }
990         }
991
992         return 0;
993 }
994
995 static int
996 bnx2_set_mac_link(struct bnx2 *bp)
997 {
998         u32 val;
999
1000         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1001         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1002                 (bp->duplex == DUPLEX_HALF)) {
1003                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1004         }
1005
1006         /* Configure the EMAC mode register. */
1007         val = REG_RD(bp, BNX2_EMAC_MODE);
1008
1009         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1010                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1011                 BNX2_EMAC_MODE_25G_MODE);
1012
1013         if (bp->link_up) {
1014                 switch (bp->line_speed) {
1015                         case SPEED_10:
1016                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1017                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1018                                         break;
1019                                 }
1020                                 /* fall through */
1021                         case SPEED_100:
1022                                 val |= BNX2_EMAC_MODE_PORT_MII;
1023                                 break;
1024                         case SPEED_2500:
1025                                 val |= BNX2_EMAC_MODE_25G_MODE;
1026                                 /* fall through */
1027                         case SPEED_1000:
1028                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1029                                 break;
1030                 }
1031         }
1032         else {
1033                 val |= BNX2_EMAC_MODE_PORT_GMII;
1034         }
1035
1036         /* Set the MAC to operate in the appropriate duplex mode. */
1037         if (bp->duplex == DUPLEX_HALF)
1038                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1039         REG_WR(bp, BNX2_EMAC_MODE, val);
1040
1041         /* Enable/disable rx PAUSE. */
1042         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1043
1044         if (bp->flow_ctrl & FLOW_CTRL_RX)
1045                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1046         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1047
1048         /* Enable/disable tx PAUSE. */
1049         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1050         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1051
1052         if (bp->flow_ctrl & FLOW_CTRL_TX)
1053                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1054         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1055
1056         /* Acknowledge the interrupt. */
1057         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1058
1059         return 0;
1060 }
1061
1062 static void
1063 bnx2_enable_bmsr1(struct bnx2 *bp)
1064 {
1065         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1066             (CHIP_NUM(bp) == CHIP_NUM_5709))
1067                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068                                MII_BNX2_BLK_ADDR_GP_STATUS);
1069 }
1070
1071 static void
1072 bnx2_disable_bmsr1(struct bnx2 *bp)
1073 {
1074         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1075             (CHIP_NUM(bp) == CHIP_NUM_5709))
1076                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1077                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1078 }
1079
1080 static int
1081 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1082 {
1083         u32 up1;
1084         int ret = 1;
1085
1086         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1087                 return 0;
1088
1089         if (bp->autoneg & AUTONEG_SPEED)
1090                 bp->advertising |= ADVERTISED_2500baseX_Full;
1091
1092         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1093                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1094
1095         bnx2_read_phy(bp, bp->mii_up1, &up1);
1096         if (!(up1 & BCM5708S_UP1_2G5)) {
1097                 up1 |= BCM5708S_UP1_2G5;
1098                 bnx2_write_phy(bp, bp->mii_up1, up1);
1099                 ret = 0;
1100         }
1101
1102         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1103                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1104                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1105
1106         return ret;
1107 }
1108
1109 static int
1110 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1111 {
1112         u32 up1;
1113         int ret = 0;
1114
1115         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1116                 return 0;
1117
1118         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1119                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1120
1121         bnx2_read_phy(bp, bp->mii_up1, &up1);
1122         if (up1 & BCM5708S_UP1_2G5) {
1123                 up1 &= ~BCM5708S_UP1_2G5;
1124                 bnx2_write_phy(bp, bp->mii_up1, up1);
1125                 ret = 1;
1126         }
1127
1128         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1129                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1130                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1131
1132         return ret;
1133 }
1134
1135 static void
1136 bnx2_enable_forced_2g5(struct bnx2 *bp)
1137 {
1138         u32 bmcr;
1139
1140         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1141                 return;
1142
1143         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1144                 u32 val;
1145
1146                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1147                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1148                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1149                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1150                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1151                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1152
1153                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1154                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1155                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1156
1157         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1158                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1159                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1160         }
1161
1162         if (bp->autoneg & AUTONEG_SPEED) {
1163                 bmcr &= ~BMCR_ANENABLE;
1164                 if (bp->req_duplex == DUPLEX_FULL)
1165                         bmcr |= BMCR_FULLDPLX;
1166         }
1167         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1168 }
1169
1170 static void
1171 bnx2_disable_forced_2g5(struct bnx2 *bp)
1172 {
1173         u32 bmcr;
1174
1175         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1176                 return;
1177
1178         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1179                 u32 val;
1180
1181                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1182                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1183                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1184                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1185                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1186
1187                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1188                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1189                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1190
1191         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1192                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1193                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1194         }
1195
1196         if (bp->autoneg & AUTONEG_SPEED)
1197                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1198         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1199 }
1200
1201 static void
1202 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1203 {
1204         u32 val;
1205
1206         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1207         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1208         if (start)
1209                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1210         else
1211                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1212 }
1213
1214 static int
1215 bnx2_set_link(struct bnx2 *bp)
1216 {
1217         u32 bmsr;
1218         u8 link_up;
1219
1220         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1221                 bp->link_up = 1;
1222                 return 0;
1223         }
1224
1225         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1226                 return 0;
1227
1228         link_up = bp->link_up;
1229
1230         bnx2_enable_bmsr1(bp);
1231         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1232         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1233         bnx2_disable_bmsr1(bp);
1234
1235         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1236             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1237                 u32 val;
1238
1239                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1240                         bnx2_5706s_force_link_dn(bp, 0);
1241                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1242                 }
1243                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1244                 if (val & BNX2_EMAC_STATUS_LINK)
1245                         bmsr |= BMSR_LSTATUS;
1246                 else
1247                         bmsr &= ~BMSR_LSTATUS;
1248         }
1249
1250         if (bmsr & BMSR_LSTATUS) {
1251                 bp->link_up = 1;
1252
1253                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1254                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1255                                 bnx2_5706s_linkup(bp);
1256                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1257                                 bnx2_5708s_linkup(bp);
1258                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259                                 bnx2_5709s_linkup(bp);
1260                 }
1261                 else {
1262                         bnx2_copper_linkup(bp);
1263                 }
1264                 bnx2_resolve_flow_ctrl(bp);
1265         }
1266         else {
1267                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1268                     (bp->autoneg & AUTONEG_SPEED))
1269                         bnx2_disable_forced_2g5(bp);
1270
1271                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1272                         u32 bmcr;
1273
1274                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1275                         bmcr |= BMCR_ANENABLE;
1276                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1277
1278                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1279                 }
1280                 bp->link_up = 0;
1281         }
1282
1283         if (bp->link_up != link_up) {
1284                 bnx2_report_link(bp);
1285         }
1286
1287         bnx2_set_mac_link(bp);
1288
1289         return 0;
1290 }
1291
1292 static int
1293 bnx2_reset_phy(struct bnx2 *bp)
1294 {
1295         int i;
1296         u32 reg;
1297
1298         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1299
1300 #define PHY_RESET_MAX_WAIT 100
1301         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1302                 udelay(10);
1303
1304                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1305                 if (!(reg & BMCR_RESET)) {
1306                         udelay(20);
1307                         break;
1308                 }
1309         }
1310         if (i == PHY_RESET_MAX_WAIT) {
1311                 return -EBUSY;
1312         }
1313         return 0;
1314 }
1315
1316 static u32
1317 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1318 {
1319         u32 adv = 0;
1320
1321         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1322                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1323
1324                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1325                         adv = ADVERTISE_1000XPAUSE;
1326                 }
1327                 else {
1328                         adv = ADVERTISE_PAUSE_CAP;
1329                 }
1330         }
1331         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1332                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1333                         adv = ADVERTISE_1000XPSE_ASYM;
1334                 }
1335                 else {
1336                         adv = ADVERTISE_PAUSE_ASYM;
1337                 }
1338         }
1339         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1340                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1341                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1342                 }
1343                 else {
1344                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1345                 }
1346         }
1347         return adv;
1348 }
1349
1350 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1351
1352 static int
1353 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1354 {
1355         u32 speed_arg = 0, pause_adv;
1356
1357         pause_adv = bnx2_phy_get_pause_adv(bp);
1358
1359         if (bp->autoneg & AUTONEG_SPEED) {
1360                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1361                 if (bp->advertising & ADVERTISED_10baseT_Half)
1362                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1363                 if (bp->advertising & ADVERTISED_10baseT_Full)
1364                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1365                 if (bp->advertising & ADVERTISED_100baseT_Half)
1366                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1367                 if (bp->advertising & ADVERTISED_100baseT_Full)
1368                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1369                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1370                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1371                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1372                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1373         } else {
1374                 if (bp->req_line_speed == SPEED_2500)
1375                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1376                 else if (bp->req_line_speed == SPEED_1000)
1377                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1378                 else if (bp->req_line_speed == SPEED_100) {
1379                         if (bp->req_duplex == DUPLEX_FULL)
1380                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1381                         else
1382                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1383                 } else if (bp->req_line_speed == SPEED_10) {
1384                         if (bp->req_duplex == DUPLEX_FULL)
1385                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1386                         else
1387                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1388                 }
1389         }
1390
1391         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1392                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1393         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1394                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1395
1396         if (port == PORT_TP)
1397                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1398                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1399
1400         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1401
1402         spin_unlock_bh(&bp->phy_lock);
1403         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1404         spin_lock_bh(&bp->phy_lock);
1405
1406         return 0;
1407 }
1408
1409 static int
1410 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1411 {
1412         u32 adv, bmcr;
1413         u32 new_adv = 0;
1414
1415         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1416                 return (bnx2_setup_remote_phy(bp, port));
1417
1418         if (!(bp->autoneg & AUTONEG_SPEED)) {
1419                 u32 new_bmcr;
1420                 int force_link_down = 0;
1421
1422                 if (bp->req_line_speed == SPEED_2500) {
1423                         if (!bnx2_test_and_enable_2g5(bp))
1424                                 force_link_down = 1;
1425                 } else if (bp->req_line_speed == SPEED_1000) {
1426                         if (bnx2_test_and_disable_2g5(bp))
1427                                 force_link_down = 1;
1428                 }
1429                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1430                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1431
1432                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1433                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1434                 new_bmcr |= BMCR_SPEED1000;
1435
1436                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1437                         if (bp->req_line_speed == SPEED_2500)
1438                                 bnx2_enable_forced_2g5(bp);
1439                         else if (bp->req_line_speed == SPEED_1000) {
1440                                 bnx2_disable_forced_2g5(bp);
1441                                 new_bmcr &= ~0x2000;
1442                         }
1443
1444                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1445                         if (bp->req_line_speed == SPEED_2500)
1446                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1447                         else
1448                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1449                 }
1450
1451                 if (bp->req_duplex == DUPLEX_FULL) {
1452                         adv |= ADVERTISE_1000XFULL;
1453                         new_bmcr |= BMCR_FULLDPLX;
1454                 }
1455                 else {
1456                         adv |= ADVERTISE_1000XHALF;
1457                         new_bmcr &= ~BMCR_FULLDPLX;
1458                 }
1459                 if ((new_bmcr != bmcr) || (force_link_down)) {
1460                         /* Force a link down visible on the other side */
1461                         if (bp->link_up) {
1462                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1463                                                ~(ADVERTISE_1000XFULL |
1464                                                  ADVERTISE_1000XHALF));
1465                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1466                                         BMCR_ANRESTART | BMCR_ANENABLE);
1467
1468                                 bp->link_up = 0;
1469                                 netif_carrier_off(bp->dev);
1470                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1471                                 bnx2_report_link(bp);
1472                         }
1473                         bnx2_write_phy(bp, bp->mii_adv, adv);
1474                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1475                 } else {
1476                         bnx2_resolve_flow_ctrl(bp);
1477                         bnx2_set_mac_link(bp);
1478                 }
1479                 return 0;
1480         }
1481
1482         bnx2_test_and_enable_2g5(bp);
1483
1484         if (bp->advertising & ADVERTISED_1000baseT_Full)
1485                 new_adv |= ADVERTISE_1000XFULL;
1486
1487         new_adv |= bnx2_phy_get_pause_adv(bp);
1488
1489         bnx2_read_phy(bp, bp->mii_adv, &adv);
1490         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1491
1492         bp->serdes_an_pending = 0;
1493         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1494                 /* Force a link down visible on the other side */
1495                 if (bp->link_up) {
1496                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1497                         spin_unlock_bh(&bp->phy_lock);
1498                         msleep(20);
1499                         spin_lock_bh(&bp->phy_lock);
1500                 }
1501
1502                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1503                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1504                         BMCR_ANENABLE);
1505                 /* Speed up link-up time when the link partner
1506                  * does not autonegotiate which is very common
1507                  * in blade servers. Some blade servers use
1508                  * IPMI for kerboard input and it's important
1509                  * to minimize link disruptions. Autoneg. involves
1510                  * exchanging base pages plus 3 next pages and
1511                  * normally completes in about 120 msec.
1512                  */
1513                 bp->current_interval = SERDES_AN_TIMEOUT;
1514                 bp->serdes_an_pending = 1;
1515                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1516         } else {
1517                 bnx2_resolve_flow_ctrl(bp);
1518                 bnx2_set_mac_link(bp);
1519         }
1520
1521         return 0;
1522 }
1523
1524 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1525         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1526                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1527                 (ADVERTISED_1000baseT_Full)
1528
1529 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1530         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1531         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1532         ADVERTISED_1000baseT_Full)
1533
1534 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1535         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1536
1537 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1538
1539 static void
1540 bnx2_set_default_remote_link(struct bnx2 *bp)
1541 {
1542         u32 link;
1543
1544         if (bp->phy_port == PORT_TP)
1545                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1546         else
1547                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1548
1549         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1550                 bp->req_line_speed = 0;
1551                 bp->autoneg |= AUTONEG_SPEED;
1552                 bp->advertising = ADVERTISED_Autoneg;
1553                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1554                         bp->advertising |= ADVERTISED_10baseT_Half;
1555                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1556                         bp->advertising |= ADVERTISED_10baseT_Full;
1557                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1558                         bp->advertising |= ADVERTISED_100baseT_Half;
1559                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1560                         bp->advertising |= ADVERTISED_100baseT_Full;
1561                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1562                         bp->advertising |= ADVERTISED_1000baseT_Full;
1563                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1564                         bp->advertising |= ADVERTISED_2500baseX_Full;
1565         } else {
1566                 bp->autoneg = 0;
1567                 bp->advertising = 0;
1568                 bp->req_duplex = DUPLEX_FULL;
1569                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1570                         bp->req_line_speed = SPEED_10;
1571                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1572                                 bp->req_duplex = DUPLEX_HALF;
1573                 }
1574                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1575                         bp->req_line_speed = SPEED_100;
1576                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1577                                 bp->req_duplex = DUPLEX_HALF;
1578                 }
1579                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1580                         bp->req_line_speed = SPEED_1000;
1581                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1582                         bp->req_line_speed = SPEED_2500;
1583         }
1584 }
1585
1586 static void
1587 bnx2_set_default_link(struct bnx2 *bp)
1588 {
1589         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1590                 return bnx2_set_default_remote_link(bp);
1591
1592         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1593         bp->req_line_speed = 0;
1594         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1595                 u32 reg;
1596
1597                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1598
1599                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1600                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1601                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1602                         bp->autoneg = 0;
1603                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1604                         bp->req_duplex = DUPLEX_FULL;
1605                 }
1606         } else
1607                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1608 }
1609
1610 static void
1611 bnx2_send_heart_beat(struct bnx2 *bp)
1612 {
1613         u32 msg;
1614         u32 addr;
1615
1616         spin_lock(&bp->indirect_lock);
1617         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1618         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1619         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1620         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1621         spin_unlock(&bp->indirect_lock);
1622 }
1623
1624 static void
1625 bnx2_remote_phy_event(struct bnx2 *bp)
1626 {
1627         u32 msg;
1628         u8 link_up = bp->link_up;
1629         u8 old_port;
1630
1631         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1632
1633         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1634                 bnx2_send_heart_beat(bp);
1635
1636         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1637
1638         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1639                 bp->link_up = 0;
1640         else {
1641                 u32 speed;
1642
1643                 bp->link_up = 1;
1644                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1645                 bp->duplex = DUPLEX_FULL;
1646                 switch (speed) {
1647                         case BNX2_LINK_STATUS_10HALF:
1648                                 bp->duplex = DUPLEX_HALF;
1649                         case BNX2_LINK_STATUS_10FULL:
1650                                 bp->line_speed = SPEED_10;
1651                                 break;
1652                         case BNX2_LINK_STATUS_100HALF:
1653                                 bp->duplex = DUPLEX_HALF;
1654                         case BNX2_LINK_STATUS_100BASE_T4:
1655                         case BNX2_LINK_STATUS_100FULL:
1656                                 bp->line_speed = SPEED_100;
1657                                 break;
1658                         case BNX2_LINK_STATUS_1000HALF:
1659                                 bp->duplex = DUPLEX_HALF;
1660                         case BNX2_LINK_STATUS_1000FULL:
1661                                 bp->line_speed = SPEED_1000;
1662                                 break;
1663                         case BNX2_LINK_STATUS_2500HALF:
1664                                 bp->duplex = DUPLEX_HALF;
1665                         case BNX2_LINK_STATUS_2500FULL:
1666                                 bp->line_speed = SPEED_2500;
1667                                 break;
1668                         default:
1669                                 bp->line_speed = 0;
1670                                 break;
1671                 }
1672
1673                 spin_lock(&bp->phy_lock);
1674                 bp->flow_ctrl = 0;
1675                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1676                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1677                         if (bp->duplex == DUPLEX_FULL)
1678                                 bp->flow_ctrl = bp->req_flow_ctrl;
1679                 } else {
1680                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1681                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1682                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1683                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1684                 }
1685
1686                 old_port = bp->phy_port;
1687                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1688                         bp->phy_port = PORT_FIBRE;
1689                 else
1690                         bp->phy_port = PORT_TP;
1691
1692                 if (old_port != bp->phy_port)
1693                         bnx2_set_default_link(bp);
1694
1695                 spin_unlock(&bp->phy_lock);
1696         }
1697         if (bp->link_up != link_up)
1698                 bnx2_report_link(bp);
1699
1700         bnx2_set_mac_link(bp);
1701 }
1702
1703 static int
1704 bnx2_set_remote_link(struct bnx2 *bp)
1705 {
1706         u32 evt_code;
1707
1708         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1709         switch (evt_code) {
1710                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1711                         bnx2_remote_phy_event(bp);
1712                         break;
1713                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1714                 default:
1715                         bnx2_send_heart_beat(bp);
1716                         break;
1717         }
1718         return 0;
1719 }
1720
1721 static int
1722 bnx2_setup_copper_phy(struct bnx2 *bp)
1723 {
1724         u32 bmcr;
1725         u32 new_bmcr;
1726
1727         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1728
1729         if (bp->autoneg & AUTONEG_SPEED) {
1730                 u32 adv_reg, adv1000_reg;
1731                 u32 new_adv_reg = 0;
1732                 u32 new_adv1000_reg = 0;
1733
1734                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1735                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1736                         ADVERTISE_PAUSE_ASYM);
1737
1738                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1739                 adv1000_reg &= PHY_ALL_1000_SPEED;
1740
1741                 if (bp->advertising & ADVERTISED_10baseT_Half)
1742                         new_adv_reg |= ADVERTISE_10HALF;
1743                 if (bp->advertising & ADVERTISED_10baseT_Full)
1744                         new_adv_reg |= ADVERTISE_10FULL;
1745                 if (bp->advertising & ADVERTISED_100baseT_Half)
1746                         new_adv_reg |= ADVERTISE_100HALF;
1747                 if (bp->advertising & ADVERTISED_100baseT_Full)
1748                         new_adv_reg |= ADVERTISE_100FULL;
1749                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1750                         new_adv1000_reg |= ADVERTISE_1000FULL;
1751
1752                 new_adv_reg |= ADVERTISE_CSMA;
1753
1754                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1755
1756                 if ((adv1000_reg != new_adv1000_reg) ||
1757                         (adv_reg != new_adv_reg) ||
1758                         ((bmcr & BMCR_ANENABLE) == 0)) {
1759
1760                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1761                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1762                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1763                                 BMCR_ANENABLE);
1764                 }
1765                 else if (bp->link_up) {
1766                         /* Flow ctrl may have changed from auto to forced */
1767                         /* or vice-versa. */
1768
1769                         bnx2_resolve_flow_ctrl(bp);
1770                         bnx2_set_mac_link(bp);
1771                 }
1772                 return 0;
1773         }
1774
1775         new_bmcr = 0;
1776         if (bp->req_line_speed == SPEED_100) {
1777                 new_bmcr |= BMCR_SPEED100;
1778         }
1779         if (bp->req_duplex == DUPLEX_FULL) {
1780                 new_bmcr |= BMCR_FULLDPLX;
1781         }
1782         if (new_bmcr != bmcr) {
1783                 u32 bmsr;
1784
1785                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1786                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1787
1788                 if (bmsr & BMSR_LSTATUS) {
1789                         /* Force link down */
1790                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1791                         spin_unlock_bh(&bp->phy_lock);
1792                         msleep(50);
1793                         spin_lock_bh(&bp->phy_lock);
1794
1795                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1796                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1797                 }
1798
1799                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1800
1801                 /* Normally, the new speed is setup after the link has
1802                  * gone down and up again. In some cases, link will not go
1803                  * down so we need to set up the new speed here.
1804                  */
1805                 if (bmsr & BMSR_LSTATUS) {
1806                         bp->line_speed = bp->req_line_speed;
1807                         bp->duplex = bp->req_duplex;
1808                         bnx2_resolve_flow_ctrl(bp);
1809                         bnx2_set_mac_link(bp);
1810                 }
1811         } else {
1812                 bnx2_resolve_flow_ctrl(bp);
1813                 bnx2_set_mac_link(bp);
1814         }
1815         return 0;
1816 }
1817
1818 static int
1819 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1820 {
1821         if (bp->loopback == MAC_LOOPBACK)
1822                 return 0;
1823
1824         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1825                 return (bnx2_setup_serdes_phy(bp, port));
1826         }
1827         else {
1828                 return (bnx2_setup_copper_phy(bp));
1829         }
1830 }
1831
1832 static int
1833 bnx2_init_5709s_phy(struct bnx2 *bp)
1834 {
1835         u32 val;
1836
1837         bp->mii_bmcr = MII_BMCR + 0x10;
1838         bp->mii_bmsr = MII_BMSR + 0x10;
1839         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1840         bp->mii_adv = MII_ADVERTISE + 0x10;
1841         bp->mii_lpa = MII_LPA + 0x10;
1842         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1843
1844         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1845         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1846
1847         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1848         bnx2_reset_phy(bp);
1849
1850         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1851
1852         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1853         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1854         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1855         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1856
1857         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1858         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1859         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1860                 val |= BCM5708S_UP1_2G5;
1861         else
1862                 val &= ~BCM5708S_UP1_2G5;
1863         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1864
1865         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1866         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1867         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1868         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1869
1870         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1871
1872         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1873               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1874         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1875
1876         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1877
1878         return 0;
1879 }
1880
1881 static int
1882 bnx2_init_5708s_phy(struct bnx2 *bp)
1883 {
1884         u32 val;
1885
1886         bnx2_reset_phy(bp);
1887
1888         bp->mii_up1 = BCM5708S_UP1;
1889
1890         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1891         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1892         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1893
1894         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1895         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1896         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1897
1898         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1899         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1900         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1901
1902         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1903                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1904                 val |= BCM5708S_UP1_2G5;
1905                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1906         }
1907
1908         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1909             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1910             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1911                 /* increase tx signal amplitude */
1912                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1913                                BCM5708S_BLK_ADDR_TX_MISC);
1914                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1915                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1916                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1917                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1918         }
1919
1920         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1921               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1922
1923         if (val) {
1924                 u32 is_backplane;
1925
1926                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1927                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1928                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1929                                        BCM5708S_BLK_ADDR_TX_MISC);
1930                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1931                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1932                                        BCM5708S_BLK_ADDR_DIG);
1933                 }
1934         }
1935         return 0;
1936 }
1937
1938 static int
1939 bnx2_init_5706s_phy(struct bnx2 *bp)
1940 {
1941         bnx2_reset_phy(bp);
1942
1943         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1944
1945         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1946                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1947
1948         if (bp->dev->mtu > 1500) {
1949                 u32 val;
1950
1951                 /* Set extended packet length bit */
1952                 bnx2_write_phy(bp, 0x18, 0x7);
1953                 bnx2_read_phy(bp, 0x18, &val);
1954                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1955
1956                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1957                 bnx2_read_phy(bp, 0x1c, &val);
1958                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1959         }
1960         else {
1961                 u32 val;
1962
1963                 bnx2_write_phy(bp, 0x18, 0x7);
1964                 bnx2_read_phy(bp, 0x18, &val);
1965                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1966
1967                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1968                 bnx2_read_phy(bp, 0x1c, &val);
1969                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int
1976 bnx2_init_copper_phy(struct bnx2 *bp)
1977 {
1978         u32 val;
1979
1980         bnx2_reset_phy(bp);
1981
1982         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
1983                 bnx2_write_phy(bp, 0x18, 0x0c00);
1984                 bnx2_write_phy(bp, 0x17, 0x000a);
1985                 bnx2_write_phy(bp, 0x15, 0x310b);
1986                 bnx2_write_phy(bp, 0x17, 0x201f);
1987                 bnx2_write_phy(bp, 0x15, 0x9506);
1988                 bnx2_write_phy(bp, 0x17, 0x401f);
1989                 bnx2_write_phy(bp, 0x15, 0x14e2);
1990                 bnx2_write_phy(bp, 0x18, 0x0400);
1991         }
1992
1993         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
1994                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1995                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1996                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1997                 val &= ~(1 << 8);
1998                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1999         }
2000
2001         if (bp->dev->mtu > 1500) {
2002                 /* Set extended packet length bit */
2003                 bnx2_write_phy(bp, 0x18, 0x7);
2004                 bnx2_read_phy(bp, 0x18, &val);
2005                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2006
2007                 bnx2_read_phy(bp, 0x10, &val);
2008                 bnx2_write_phy(bp, 0x10, val | 0x1);
2009         }
2010         else {
2011                 bnx2_write_phy(bp, 0x18, 0x7);
2012                 bnx2_read_phy(bp, 0x18, &val);
2013                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2014
2015                 bnx2_read_phy(bp, 0x10, &val);
2016                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2017         }
2018
2019         /* ethernet@wirespeed */
2020         bnx2_write_phy(bp, 0x18, 0x7007);
2021         bnx2_read_phy(bp, 0x18, &val);
2022         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2023         return 0;
2024 }
2025
2026
2027 static int
2028 bnx2_init_phy(struct bnx2 *bp)
2029 {
2030         u32 val;
2031         int rc = 0;
2032
2033         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2034         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2035
2036         bp->mii_bmcr = MII_BMCR;
2037         bp->mii_bmsr = MII_BMSR;
2038         bp->mii_bmsr1 = MII_BMSR;
2039         bp->mii_adv = MII_ADVERTISE;
2040         bp->mii_lpa = MII_LPA;
2041
2042         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2043
2044         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2045                 goto setup_phy;
2046
2047         bnx2_read_phy(bp, MII_PHYSID1, &val);
2048         bp->phy_id = val << 16;
2049         bnx2_read_phy(bp, MII_PHYSID2, &val);
2050         bp->phy_id |= val & 0xffff;
2051
2052         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2053                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2054                         rc = bnx2_init_5706s_phy(bp);
2055                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2056                         rc = bnx2_init_5708s_phy(bp);
2057                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2058                         rc = bnx2_init_5709s_phy(bp);
2059         }
2060         else {
2061                 rc = bnx2_init_copper_phy(bp);
2062         }
2063
2064 setup_phy:
2065         if (!rc)
2066                 rc = bnx2_setup_phy(bp, bp->phy_port);
2067
2068         return rc;
2069 }
2070
2071 static int
2072 bnx2_set_mac_loopback(struct bnx2 *bp)
2073 {
2074         u32 mac_mode;
2075
2076         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2077         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2078         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2079         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2080         bp->link_up = 1;
2081         return 0;
2082 }
2083
2084 static int bnx2_test_link(struct bnx2 *);
2085
2086 static int
2087 bnx2_set_phy_loopback(struct bnx2 *bp)
2088 {
2089         u32 mac_mode;
2090         int rc, i;
2091
2092         spin_lock_bh(&bp->phy_lock);
2093         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2094                             BMCR_SPEED1000);
2095         spin_unlock_bh(&bp->phy_lock);
2096         if (rc)
2097                 return rc;
2098
2099         for (i = 0; i < 10; i++) {
2100                 if (bnx2_test_link(bp) == 0)
2101                         break;
2102                 msleep(100);
2103         }
2104
2105         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2106         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2107                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2108                       BNX2_EMAC_MODE_25G_MODE);
2109
2110         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2111         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2112         bp->link_up = 1;
2113         return 0;
2114 }
2115
2116 static int
2117 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2118 {
2119         int i;
2120         u32 val;
2121
2122         bp->fw_wr_seq++;
2123         msg_data |= bp->fw_wr_seq;
2124
2125         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2126
2127         /* wait for an acknowledgement. */
2128         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2129                 msleep(10);
2130
2131                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2132
2133                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2134                         break;
2135         }
2136         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2137                 return 0;
2138
2139         /* If we timed out, inform the firmware that this is the case. */
2140         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2141                 if (!silent)
2142                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2143                                             "%x\n", msg_data);
2144
2145                 msg_data &= ~BNX2_DRV_MSG_CODE;
2146                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2147
2148                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2149
2150                 return -EBUSY;
2151         }
2152
2153         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2154                 return -EIO;
2155
2156         return 0;
2157 }
2158
2159 static int
2160 bnx2_init_5709_context(struct bnx2 *bp)
2161 {
2162         int i, ret = 0;
2163         u32 val;
2164
2165         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2166         val |= (BCM_PAGE_BITS - 8) << 16;
2167         REG_WR(bp, BNX2_CTX_COMMAND, val);
2168         for (i = 0; i < 10; i++) {
2169                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2170                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2171                         break;
2172                 udelay(2);
2173         }
2174         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2175                 return -EBUSY;
2176
2177         for (i = 0; i < bp->ctx_pages; i++) {
2178                 int j;
2179
2180                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2181                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2182                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2183                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2184                        (u64) bp->ctx_blk_mapping[i] >> 32);
2185                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2186                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2187                 for (j = 0; j < 10; j++) {
2188
2189                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2190                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2191                                 break;
2192                         udelay(5);
2193                 }
2194                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2195                         ret = -EBUSY;
2196                         break;
2197                 }
2198         }
2199         return ret;
2200 }
2201
2202 static void
2203 bnx2_init_context(struct bnx2 *bp)
2204 {
2205         u32 vcid;
2206
2207         vcid = 96;
2208         while (vcid) {
2209                 u32 vcid_addr, pcid_addr, offset;
2210                 int i;
2211
2212                 vcid--;
2213
2214                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2215                         u32 new_vcid;
2216
2217                         vcid_addr = GET_PCID_ADDR(vcid);
2218                         if (vcid & 0x8) {
2219                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2220                         }
2221                         else {
2222                                 new_vcid = vcid;
2223                         }
2224                         pcid_addr = GET_PCID_ADDR(new_vcid);
2225                 }
2226                 else {
2227                         vcid_addr = GET_CID_ADDR(vcid);
2228                         pcid_addr = vcid_addr;
2229                 }
2230
2231                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2232                         vcid_addr += (i << PHY_CTX_SHIFT);
2233                         pcid_addr += (i << PHY_CTX_SHIFT);
2234
2235                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2236                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2237
2238                         /* Zero out the context. */
2239                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2240                                 CTX_WR(bp, vcid_addr, offset, 0);
2241                 }
2242         }
2243 }
2244
2245 static int
2246 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2247 {
2248         u16 *good_mbuf;
2249         u32 good_mbuf_cnt;
2250         u32 val;
2251
2252         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2253         if (good_mbuf == NULL) {
2254                 printk(KERN_ERR PFX "Failed to allocate memory in "
2255                                     "bnx2_alloc_bad_rbuf\n");
2256                 return -ENOMEM;
2257         }
2258
2259         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2260                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2261
2262         good_mbuf_cnt = 0;
2263
2264         /* Allocate a bunch of mbufs and save the good ones in an array. */
2265         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2266         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2267                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2268                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2269
2270                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2271
2272                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2273
2274                 /* The addresses with Bit 9 set are bad memory blocks. */
2275                 if (!(val & (1 << 9))) {
2276                         good_mbuf[good_mbuf_cnt] = (u16) val;
2277                         good_mbuf_cnt++;
2278                 }
2279
2280                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2281         }
2282
2283         /* Free the good ones back to the mbuf pool thus discarding
2284          * all the bad ones. */
2285         while (good_mbuf_cnt) {
2286                 good_mbuf_cnt--;
2287
2288                 val = good_mbuf[good_mbuf_cnt];
2289                 val = (val << 9) | val | 1;
2290
2291                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2292         }
2293         kfree(good_mbuf);
2294         return 0;
2295 }
2296
2297 static void
2298 bnx2_set_mac_addr(struct bnx2 *bp)
2299 {
2300         u32 val;
2301         u8 *mac_addr = bp->dev->dev_addr;
2302
2303         val = (mac_addr[0] << 8) | mac_addr[1];
2304
2305         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2306
2307         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2308                 (mac_addr[4] << 8) | mac_addr[5];
2309
2310         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2311 }
2312
2313 static inline int
2314 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2315 {
2316         dma_addr_t mapping;
2317         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2318         struct rx_bd *rxbd =
2319                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2320         struct page *page = alloc_page(GFP_ATOMIC);
2321
2322         if (!page)
2323                 return -ENOMEM;
2324         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2325                                PCI_DMA_FROMDEVICE);
2326         rx_pg->page = page;
2327         pci_unmap_addr_set(rx_pg, mapping, mapping);
2328         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2329         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2330         return 0;
2331 }
2332
2333 static void
2334 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2335 {
2336         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2337         struct page *page = rx_pg->page;
2338
2339         if (!page)
2340                 return;
2341
2342         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2343                        PCI_DMA_FROMDEVICE);
2344
2345         __free_page(page);
2346         rx_pg->page = NULL;
2347 }
2348
2349 static inline int
2350 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2351 {
2352         struct sk_buff *skb;
2353         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2354         dma_addr_t mapping;
2355         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2356         unsigned long align;
2357
2358         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2359         if (skb == NULL) {
2360                 return -ENOMEM;
2361         }
2362
2363         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2364                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2365
2366         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2367                 PCI_DMA_FROMDEVICE);
2368
2369         rx_buf->skb = skb;
2370         pci_unmap_addr_set(rx_buf, mapping, mapping);
2371
2372         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2373         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2374
2375         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2376
2377         return 0;
2378 }
2379
2380 static int
2381 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2382 {
2383         struct status_block *sblk = bnapi->status_blk;
2384         u32 new_link_state, old_link_state;
2385         int is_set = 1;
2386
2387         new_link_state = sblk->status_attn_bits & event;
2388         old_link_state = sblk->status_attn_bits_ack & event;
2389         if (new_link_state != old_link_state) {
2390                 if (new_link_state)
2391                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2392                 else
2393                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2394         } else
2395                 is_set = 0;
2396
2397         return is_set;
2398 }
2399
2400 static void
2401 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2402 {
2403         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
2404                 spin_lock(&bp->phy_lock);
2405                 bnx2_set_link(bp);
2406                 spin_unlock(&bp->phy_lock);
2407         }
2408         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2409                 bnx2_set_remote_link(bp);
2410
2411 }
2412
2413 static inline u16
2414 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2415 {
2416         u16 cons;
2417
2418         if (bnapi->int_num == 0)
2419                 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2420         else
2421                 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2422
2423         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2424                 cons++;
2425         return cons;
2426 }
2427
2428 static int
2429 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2430 {
2431         u16 hw_cons, sw_cons, sw_ring_cons;
2432         int tx_pkt = 0;
2433
2434         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2435         sw_cons = bnapi->tx_cons;
2436
2437         while (sw_cons != hw_cons) {
2438                 struct sw_bd *tx_buf;
2439                 struct sk_buff *skb;
2440                 int i, last;
2441
2442                 sw_ring_cons = TX_RING_IDX(sw_cons);
2443
2444                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2445                 skb = tx_buf->skb;
2446
2447                 /* partial BD completions possible with TSO packets */
2448                 if (skb_is_gso(skb)) {
2449                         u16 last_idx, last_ring_idx;
2450
2451                         last_idx = sw_cons +
2452                                 skb_shinfo(skb)->nr_frags + 1;
2453                         last_ring_idx = sw_ring_cons +
2454                                 skb_shinfo(skb)->nr_frags + 1;
2455                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2456                                 last_idx++;
2457                         }
2458                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2459                                 break;
2460                         }
2461                 }
2462
2463                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2464                         skb_headlen(skb), PCI_DMA_TODEVICE);
2465
2466                 tx_buf->skb = NULL;
2467                 last = skb_shinfo(skb)->nr_frags;
2468
2469                 for (i = 0; i < last; i++) {
2470                         sw_cons = NEXT_TX_BD(sw_cons);
2471
2472                         pci_unmap_page(bp->pdev,
2473                                 pci_unmap_addr(
2474                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2475                                         mapping),
2476                                 skb_shinfo(skb)->frags[i].size,
2477                                 PCI_DMA_TODEVICE);
2478                 }
2479
2480                 sw_cons = NEXT_TX_BD(sw_cons);
2481
2482                 dev_kfree_skb(skb);
2483                 tx_pkt++;
2484                 if (tx_pkt == budget)
2485                         break;
2486
2487                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2488         }
2489
2490         bnapi->hw_tx_cons = hw_cons;
2491         bnapi->tx_cons = sw_cons;
2492         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2493          * before checking for netif_queue_stopped().  Without the
2494          * memory barrier, there is a small possibility that bnx2_start_xmit()
2495          * will miss it and cause the queue to be stopped forever.
2496          */
2497         smp_mb();
2498
2499         if (unlikely(netif_queue_stopped(bp->dev)) &&
2500                      (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2501                 netif_tx_lock(bp->dev);
2502                 if ((netif_queue_stopped(bp->dev)) &&
2503                     (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2504                         netif_wake_queue(bp->dev);
2505                 netif_tx_unlock(bp->dev);
2506         }
2507         return tx_pkt;
2508 }
2509
2510 static void
2511 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2512                         struct sk_buff *skb, int count)
2513 {
2514         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2515         struct rx_bd *cons_bd, *prod_bd;
2516         dma_addr_t mapping;
2517         int i;
2518         u16 hw_prod = bnapi->rx_pg_prod, prod;
2519         u16 cons = bnapi->rx_pg_cons;
2520
2521         for (i = 0; i < count; i++) {
2522                 prod = RX_PG_RING_IDX(hw_prod);
2523
2524                 prod_rx_pg = &bp->rx_pg_ring[prod];
2525                 cons_rx_pg = &bp->rx_pg_ring[cons];
2526                 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2527                 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2528
2529                 if (i == 0 && skb) {
2530                         struct page *page;
2531                         struct skb_shared_info *shinfo;
2532
2533                         shinfo = skb_shinfo(skb);
2534                         shinfo->nr_frags--;
2535                         page = shinfo->frags[shinfo->nr_frags].page;
2536                         shinfo->frags[shinfo->nr_frags].page = NULL;
2537                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2538                                                PCI_DMA_FROMDEVICE);
2539                         cons_rx_pg->page = page;
2540                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2541                         dev_kfree_skb(skb);
2542                 }
2543                 if (prod != cons) {
2544                         prod_rx_pg->page = cons_rx_pg->page;
2545                         cons_rx_pg->page = NULL;
2546                         pci_unmap_addr_set(prod_rx_pg, mapping,
2547                                 pci_unmap_addr(cons_rx_pg, mapping));
2548
2549                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2550                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2551
2552                 }
2553                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2554                 hw_prod = NEXT_RX_BD(hw_prod);
2555         }
2556         bnapi->rx_pg_prod = hw_prod;
2557         bnapi->rx_pg_cons = cons;
2558 }
2559
2560 static inline void
2561 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2562         u16 cons, u16 prod)
2563 {
2564         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2565         struct rx_bd *cons_bd, *prod_bd;
2566
2567         cons_rx_buf = &bp->rx_buf_ring[cons];
2568         prod_rx_buf = &bp->rx_buf_ring[prod];
2569
2570         pci_dma_sync_single_for_device(bp->pdev,
2571                 pci_unmap_addr(cons_rx_buf, mapping),
2572                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2573
2574         bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2575
2576         prod_rx_buf->skb = skb;
2577
2578         if (cons == prod)
2579                 return;
2580
2581         pci_unmap_addr_set(prod_rx_buf, mapping,
2582                         pci_unmap_addr(cons_rx_buf, mapping));
2583
2584         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2585         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2586         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2587         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2588 }
2589
2590 static int
2591 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2592             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2593             u32 ring_idx)
2594 {
2595         int err;
2596         u16 prod = ring_idx & 0xffff;
2597
2598         err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2599         if (unlikely(err)) {
2600                 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2601                 if (hdr_len) {
2602                         unsigned int raw_len = len + 4;
2603                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2604
2605                         bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2606                 }
2607                 return err;
2608         }
2609
2610         skb_reserve(skb, bp->rx_offset);
2611         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2612                          PCI_DMA_FROMDEVICE);
2613
2614         if (hdr_len == 0) {
2615                 skb_put(skb, len);
2616                 return 0;
2617         } else {
2618                 unsigned int i, frag_len, frag_size, pages;
2619                 struct sw_pg *rx_pg;
2620                 u16 pg_cons = bnapi->rx_pg_cons;
2621                 u16 pg_prod = bnapi->rx_pg_prod;
2622
2623                 frag_size = len + 4 - hdr_len;
2624                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2625                 skb_put(skb, hdr_len);
2626
2627                 for (i = 0; i < pages; i++) {
2628                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2629                         if (unlikely(frag_len <= 4)) {
2630                                 unsigned int tail = 4 - frag_len;
2631
2632                                 bnapi->rx_pg_cons = pg_cons;
2633                                 bnapi->rx_pg_prod = pg_prod;
2634                                 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2635                                                         pages - i);
2636                                 skb->len -= tail;
2637                                 if (i == 0) {
2638                                         skb->tail -= tail;
2639                                 } else {
2640                                         skb_frag_t *frag =
2641                                                 &skb_shinfo(skb)->frags[i - 1];
2642                                         frag->size -= tail;
2643                                         skb->data_len -= tail;
2644                                         skb->truesize -= tail;
2645                                 }
2646                                 return 0;
2647                         }
2648                         rx_pg = &bp->rx_pg_ring[pg_cons];
2649
2650                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2651                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2652
2653                         if (i == pages - 1)
2654                                 frag_len -= 4;
2655
2656                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2657                         rx_pg->page = NULL;
2658
2659                         err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2660                         if (unlikely(err)) {
2661                                 bnapi->rx_pg_cons = pg_cons;
2662                                 bnapi->rx_pg_prod = pg_prod;
2663                                 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2664                                                         pages - i);
2665                                 return err;
2666                         }
2667
2668                         frag_size -= frag_len;
2669                         skb->data_len += frag_len;
2670                         skb->truesize += frag_len;
2671                         skb->len += frag_len;
2672
2673                         pg_prod = NEXT_RX_BD(pg_prod);
2674                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2675                 }
2676                 bnapi->rx_pg_prod = pg_prod;
2677                 bnapi->rx_pg_cons = pg_cons;
2678         }
2679         return 0;
2680 }
2681
2682 static inline u16
2683 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2684 {
2685         u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2686
2687         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2688                 cons++;
2689         return cons;
2690 }
2691
2692 static int
2693 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2694 {
2695         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2696         struct l2_fhdr *rx_hdr;
2697         int rx_pkt = 0, pg_ring_used = 0;
2698
2699         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2700         sw_cons = bnapi->rx_cons;
2701         sw_prod = bnapi->rx_prod;
2702
2703         /* Memory barrier necessary as speculative reads of the rx
2704          * buffer can be ahead of the index in the status block
2705          */
2706         rmb();
2707         while (sw_cons != hw_cons) {
2708                 unsigned int len, hdr_len;
2709                 u32 status;
2710                 struct sw_bd *rx_buf;
2711                 struct sk_buff *skb;
2712                 dma_addr_t dma_addr;
2713
2714                 sw_ring_cons = RX_RING_IDX(sw_cons);
2715                 sw_ring_prod = RX_RING_IDX(sw_prod);
2716
2717                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2718                 skb = rx_buf->skb;
2719
2720                 rx_buf->skb = NULL;
2721
2722                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2723
2724                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2725                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2726
2727                 rx_hdr = (struct l2_fhdr *) skb->data;
2728                 len = rx_hdr->l2_fhdr_pkt_len;
2729
2730                 if ((status = rx_hdr->l2_fhdr_status) &
2731                         (L2_FHDR_ERRORS_BAD_CRC |
2732                         L2_FHDR_ERRORS_PHY_DECODE |
2733                         L2_FHDR_ERRORS_ALIGNMENT |
2734                         L2_FHDR_ERRORS_TOO_SHORT |
2735                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2736
2737                         bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2738                                           sw_ring_prod);
2739                         goto next_rx;
2740                 }
2741                 hdr_len = 0;
2742                 if (status & L2_FHDR_STATUS_SPLIT) {
2743                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2744                         pg_ring_used = 1;
2745                 } else if (len > bp->rx_jumbo_thresh) {
2746                         hdr_len = bp->rx_jumbo_thresh;
2747                         pg_ring_used = 1;
2748                 }
2749
2750                 len -= 4;
2751
2752                 if (len <= bp->rx_copy_thresh) {
2753                         struct sk_buff *new_skb;
2754
2755                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2756                         if (new_skb == NULL) {
2757                                 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2758                                                   sw_ring_prod);
2759                                 goto next_rx;
2760                         }
2761
2762                         /* aligned copy */
2763                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2764                                       new_skb->data, len + 2);
2765                         skb_reserve(new_skb, 2);
2766                         skb_put(new_skb, len);
2767
2768                         bnx2_reuse_rx_skb(bp, bnapi, skb,
2769                                 sw_ring_cons, sw_ring_prod);
2770
2771                         skb = new_skb;
2772                 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2773                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2774                         goto next_rx;
2775
2776                 skb->protocol = eth_type_trans(skb, bp->dev);
2777
2778                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2779                         (ntohs(skb->protocol) != 0x8100)) {
2780
2781                         dev_kfree_skb(skb);
2782                         goto next_rx;
2783
2784                 }
2785
2786                 skb->ip_summed = CHECKSUM_NONE;
2787                 if (bp->rx_csum &&
2788                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2789                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2790
2791                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2792                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2793                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2794                 }
2795
2796 #ifdef BCM_VLAN
2797                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2798                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2799                                 rx_hdr->l2_fhdr_vlan_tag);
2800                 }
2801                 else
2802 #endif
2803                         netif_receive_skb(skb);
2804
2805                 bp->dev->last_rx = jiffies;
2806                 rx_pkt++;
2807
2808 next_rx:
2809                 sw_cons = NEXT_RX_BD(sw_cons);
2810                 sw_prod = NEXT_RX_BD(sw_prod);
2811
2812                 if ((rx_pkt == budget))
2813                         break;
2814
2815                 /* Refresh hw_cons to see if there is new work */
2816                 if (sw_cons == hw_cons) {
2817                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2818                         rmb();
2819                 }
2820         }
2821         bnapi->rx_cons = sw_cons;
2822         bnapi->rx_prod = sw_prod;
2823
2824         if (pg_ring_used)
2825                 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2826                          bnapi->rx_pg_prod);
2827
2828         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2829
2830         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2831
2832         mmiowb();
2833
2834         return rx_pkt;
2835
2836 }
2837
2838 /* MSI ISR - The only difference between this and the INTx ISR
2839  * is that the MSI interrupt is always serviced.
2840  */
2841 static irqreturn_t
2842 bnx2_msi(int irq, void *dev_instance)
2843 {
2844         struct net_device *dev = dev_instance;
2845         struct bnx2 *bp = netdev_priv(dev);
2846         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2847
2848         prefetch(bnapi->status_blk);
2849         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2850                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2851                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2852
2853         /* Return here if interrupt is disabled. */
2854         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2855                 return IRQ_HANDLED;
2856
2857         netif_rx_schedule(dev, &bnapi->napi);
2858
2859         return IRQ_HANDLED;
2860 }
2861
2862 static irqreturn_t
2863 bnx2_msi_1shot(int irq, void *dev_instance)
2864 {
2865         struct net_device *dev = dev_instance;
2866         struct bnx2 *bp = netdev_priv(dev);
2867         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2868
2869         prefetch(bnapi->status_blk);
2870
2871         /* Return here if interrupt is disabled. */
2872         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2873                 return IRQ_HANDLED;
2874
2875         netif_rx_schedule(dev, &bnapi->napi);
2876
2877         return IRQ_HANDLED;
2878 }
2879
2880 static irqreturn_t
2881 bnx2_interrupt(int irq, void *dev_instance)
2882 {
2883         struct net_device *dev = dev_instance;
2884         struct bnx2 *bp = netdev_priv(dev);
2885         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2886         struct status_block *sblk = bnapi->status_blk;
2887
2888         /* When using INTx, it is possible for the interrupt to arrive
2889          * at the CPU before the status block posted prior to the
2890          * interrupt. Reading a register will flush the status block.
2891          * When using MSI, the MSI message will always complete after
2892          * the status block write.
2893          */
2894         if ((sblk->status_idx == bnapi->last_status_idx) &&
2895             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2896              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2897                 return IRQ_NONE;
2898
2899         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2900                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2901                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2902
2903         /* Read back to deassert IRQ immediately to avoid too many
2904          * spurious interrupts.
2905          */
2906         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2907
2908         /* Return here if interrupt is shared and is disabled. */
2909         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2910                 return IRQ_HANDLED;
2911
2912         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2913                 bnapi->last_status_idx = sblk->status_idx;
2914                 __netif_rx_schedule(dev, &bnapi->napi);
2915         }
2916
2917         return IRQ_HANDLED;
2918 }
2919
2920 static irqreturn_t
2921 bnx2_tx_msix(int irq, void *dev_instance)
2922 {
2923         struct net_device *dev = dev_instance;
2924         struct bnx2 *bp = netdev_priv(dev);
2925         struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2926
2927         prefetch(bnapi->status_blk_msix);
2928
2929         /* Return here if interrupt is disabled. */
2930         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2931                 return IRQ_HANDLED;
2932
2933         netif_rx_schedule(dev, &bnapi->napi);
2934         return IRQ_HANDLED;
2935 }
2936
2937 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2938                                  STATUS_ATTN_BITS_TIMER_ABORT)
2939
2940 static inline int
2941 bnx2_has_work(struct bnx2_napi *bnapi)
2942 {
2943         struct status_block *sblk = bnapi->status_blk;
2944
2945         if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2946             (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
2947                 return 1;
2948
2949         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2950             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2951                 return 1;
2952
2953         return 0;
2954 }
2955
2956 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
2957 {
2958         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2959         struct bnx2 *bp = bnapi->bp;
2960         int work_done = 0;
2961         struct status_block_msix *sblk = bnapi->status_blk_msix;
2962
2963         do {
2964                 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
2965                 if (unlikely(work_done >= budget))
2966                         return work_done;
2967
2968                 bnapi->last_status_idx = sblk->status_idx;
2969                 rmb();
2970         } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
2971
2972         netif_rx_complete(bp->dev, napi);
2973         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
2974                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2975                bnapi->last_status_idx);
2976         return work_done;
2977 }
2978
2979 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2980                           int work_done, int budget)
2981 {
2982         struct status_block *sblk = bnapi->status_blk;
2983         u32 status_attn_bits = sblk->status_attn_bits;
2984         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2985
2986         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2987             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2988
2989                 bnx2_phy_int(bp, bnapi);
2990
2991                 /* This is needed to take care of transient status
2992                  * during link changes.
2993                  */
2994                 REG_WR(bp, BNX2_HC_COMMAND,
2995                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2996                 REG_RD(bp, BNX2_HC_COMMAND);
2997         }
2998
2999         if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3000                 bnx2_tx_int(bp, bnapi, 0);
3001
3002         if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3003                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3004
3005         return work_done;
3006 }
3007
3008 static int bnx2_poll(struct napi_struct *napi, int budget)
3009 {
3010         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3011         struct bnx2 *bp = bnapi->bp;
3012         int work_done = 0;
3013         struct status_block *sblk = bnapi->status_blk;
3014
3015         while (1) {
3016                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3017
3018                 if (unlikely(work_done >= budget))
3019                         break;
3020
3021                 /* bnapi->last_status_idx is used below to tell the hw how
3022                  * much work has been processed, so we must read it before
3023                  * checking for more work.
3024                  */
3025                 bnapi->last_status_idx = sblk->status_idx;
3026                 rmb();
3027                 if (likely(!bnx2_has_work(bnapi))) {
3028                         netif_rx_complete(bp->dev, napi);
3029                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3030                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3031                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3032                                        bnapi->last_status_idx);
3033                                 break;
3034                         }
3035                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3036                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3037                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3038                                bnapi->last_status_idx);
3039
3040                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3041                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3042                                bnapi->last_status_idx);
3043                         break;
3044                 }
3045         }
3046
3047         return work_done;
3048 }
3049
3050 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3051  * from set_multicast.
3052  */
3053 static void
3054 bnx2_set_rx_mode(struct net_device *dev)
3055 {
3056         struct bnx2 *bp = netdev_priv(dev);
3057         u32 rx_mode, sort_mode;
3058         int i;
3059
3060         spin_lock_bh(&bp->phy_lock);
3061
3062         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3063                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3064         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3065 #ifdef BCM_VLAN
3066         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3067                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3068 #else
3069         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3070                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3071 #endif
3072         if (dev->flags & IFF_PROMISC) {
3073                 /* Promiscuous mode. */
3074                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3075                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3076                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3077         }
3078         else if (dev->flags & IFF_ALLMULTI) {
3079                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3080                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3081                                0xffffffff);
3082                 }
3083                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3084         }
3085         else {
3086                 /* Accept one or more multicast(s). */
3087                 struct dev_mc_list *mclist;
3088                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3089                 u32 regidx;
3090                 u32 bit;
3091                 u32 crc;
3092
3093                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3094
3095                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3096                      i++, mclist = mclist->next) {
3097
3098                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3099                         bit = crc & 0xff;
3100                         regidx = (bit & 0xe0) >> 5;
3101                         bit &= 0x1f;
3102                         mc_filter[regidx] |= (1 << bit);
3103                 }
3104
3105                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3106                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3107                                mc_filter[i]);
3108                 }
3109
3110                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3111         }
3112
3113         if (rx_mode != bp->rx_mode) {
3114                 bp->rx_mode = rx_mode;
3115                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3116         }
3117
3118         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3119         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3120         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3121
3122         spin_unlock_bh(&bp->phy_lock);
3123 }
3124
3125 static void
3126 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3127         u32 rv2p_proc)
3128 {
3129         int i;
3130         u32 val;
3131
3132
3133         for (i = 0; i < rv2p_code_len; i += 8) {
3134                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3135                 rv2p_code++;
3136                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3137                 rv2p_code++;
3138
3139                 if (rv2p_proc == RV2P_PROC1) {
3140                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3141                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3142                 }
3143                 else {
3144                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3145                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3146                 }
3147         }
3148
3149         /* Reset the processor, un-stall is done later. */
3150         if (rv2p_proc == RV2P_PROC1) {
3151                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3152         }
3153         else {
3154                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3155         }
3156 }
3157
3158 static int
3159 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3160 {
3161         u32 offset;
3162         u32 val;
3163         int rc;
3164
3165         /* Halt the CPU. */
3166         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3167         val |= cpu_reg->mode_value_halt;
3168         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3169         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3170
3171         /* Load the Text area. */
3172         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3173         if (fw->gz_text) {
3174                 int j;
3175
3176                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3177                                        fw->gz_text_len);
3178                 if (rc < 0)
3179                         return rc;
3180
3181                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3182                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3183                 }
3184         }
3185
3186         /* Load the Data area. */
3187         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3188         if (fw->data) {
3189                 int j;
3190
3191                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3192                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3193                 }
3194         }
3195
3196         /* Load the SBSS area. */
3197         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3198         if (fw->sbss_len) {
3199                 int j;
3200
3201                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3202                         bnx2_reg_wr_ind(bp, offset, 0);
3203                 }
3204         }
3205
3206         /* Load the BSS area. */
3207         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3208         if (fw->bss_len) {
3209                 int j;
3210
3211                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3212                         bnx2_reg_wr_ind(bp, offset, 0);
3213                 }
3214         }
3215
3216         /* Load the Read-Only area. */
3217         offset = cpu_reg->spad_base +
3218                 (fw->rodata_addr - cpu_reg->mips_view_base);
3219         if (fw->rodata) {
3220                 int j;
3221
3222                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3223                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3224                 }
3225         }
3226
3227         /* Clear the pre-fetch instruction. */
3228         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3229         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3230
3231         /* Start the CPU. */
3232         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3233         val &= ~cpu_reg->mode_value_halt;
3234         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3235         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3236
3237         return 0;
3238 }
3239
3240 static int
3241 bnx2_init_cpus(struct bnx2 *bp)
3242 {
3243         struct cpu_reg cpu_reg;
3244         struct fw_info *fw;
3245         int rc, rv2p_len;
3246         void *text, *rv2p;
3247
3248         /* Initialize the RV2P processor. */
3249         text = vmalloc(FW_BUF_SIZE);
3250         if (!text)
3251                 return -ENOMEM;
3252         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3253                 rv2p = bnx2_xi_rv2p_proc1;
3254                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3255         } else {
3256                 rv2p = bnx2_rv2p_proc1;
3257                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3258         }
3259         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3260         if (rc < 0)
3261                 goto init_cpu_err;
3262
3263         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3264
3265         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3266                 rv2p = bnx2_xi_rv2p_proc2;
3267                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3268         } else {
3269                 rv2p = bnx2_rv2p_proc2;
3270                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3271         }
3272         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3273         if (rc < 0)
3274                 goto init_cpu_err;
3275
3276         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3277
3278         /* Initialize the RX Processor. */
3279         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3280         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3281         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3282         cpu_reg.state = BNX2_RXP_CPU_STATE;
3283         cpu_reg.state_value_clear = 0xffffff;
3284         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3285         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3286         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3287         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3288         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3289         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3290         cpu_reg.mips_view_base = 0x8000000;
3291
3292         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3293                 fw = &bnx2_rxp_fw_09;
3294         else
3295                 fw = &bnx2_rxp_fw_06;
3296
3297         fw->text = text;
3298         rc = load_cpu_fw(bp, &cpu_reg, fw);
3299         if (rc)
3300                 goto init_cpu_err;
3301
3302         /* Initialize the TX Processor. */
3303         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3304         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3305         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3306         cpu_reg.state = BNX2_TXP_CPU_STATE;
3307         cpu_reg.state_value_clear = 0xffffff;
3308         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3309         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3310         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3311         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3312         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3313         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3314         cpu_reg.mips_view_base = 0x8000000;
3315
3316         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3317                 fw = &bnx2_txp_fw_09;
3318         else
3319                 fw = &bnx2_txp_fw_06;
3320
3321         fw->text = text;
3322         rc = load_cpu_fw(bp, &cpu_reg, fw);
3323         if (rc)
3324                 goto init_cpu_err;
3325
3326         /* Initialize the TX Patch-up Processor. */
3327         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3328         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3329         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3330         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3331         cpu_reg.state_value_clear = 0xffffff;
3332         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3333         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3334         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3335         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3336         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3337         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3338         cpu_reg.mips_view_base = 0x8000000;
3339
3340         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3341                 fw = &bnx2_tpat_fw_09;
3342         else
3343                 fw = &bnx2_tpat_fw_06;
3344
3345         fw->text = text;
3346         rc = load_cpu_fw(bp, &cpu_reg, fw);
3347         if (rc)
3348                 goto init_cpu_err;
3349
3350         /* Initialize the Completion Processor. */
3351         cpu_reg.mode = BNX2_COM_CPU_MODE;
3352         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3353         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3354         cpu_reg.state = BNX2_COM_CPU_STATE;
3355         cpu_reg.state_value_clear = 0xffffff;
3356         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3357         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3358         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3359         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3360         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3361         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3362         cpu_reg.mips_view_base = 0x8000000;
3363
3364         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3365                 fw = &bnx2_com_fw_09;
3366         else
3367                 fw = &bnx2_com_fw_06;
3368
3369         fw->text = text;
3370         rc = load_cpu_fw(bp, &cpu_reg, fw);
3371         if (rc)
3372                 goto init_cpu_err;
3373
3374         /* Initialize the Command Processor. */
3375         cpu_reg.mode = BNX2_CP_CPU_MODE;
3376         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3377         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3378         cpu_reg.state = BNX2_CP_CPU_STATE;
3379         cpu_reg.state_value_clear = 0xffffff;
3380         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3381         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3382         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3383         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3384         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3385         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3386         cpu_reg.mips_view_base = 0x8000000;
3387
3388         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3389                 fw = &bnx2_cp_fw_09;
3390         else
3391                 fw = &bnx2_cp_fw_06;
3392
3393         fw->text = text;
3394         rc = load_cpu_fw(bp, &cpu_reg, fw);
3395
3396 init_cpu_err:
3397         vfree(text);
3398         return rc;
3399 }
3400
3401 static int
3402 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3403 {
3404         u16 pmcsr;
3405
3406         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3407
3408         switch (state) {
3409         case PCI_D0: {
3410                 u32 val;
3411
3412                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3413                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3414                         PCI_PM_CTRL_PME_STATUS);
3415
3416                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3417                         /* delay required during transition out of D3hot */
3418                         msleep(20);
3419
3420                 val = REG_RD(bp, BNX2_EMAC_MODE);
3421                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3422                 val &= ~BNX2_EMAC_MODE_MPKT;
3423                 REG_WR(bp, BNX2_EMAC_MODE, val);
3424
3425                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3426                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3427                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3428                 break;
3429         }
3430         case PCI_D3hot: {
3431                 int i;
3432                 u32 val, wol_msg;
3433
3434                 if (bp->wol) {
3435                         u32 advertising;
3436                         u8 autoneg;
3437
3438                         autoneg = bp->autoneg;
3439                         advertising = bp->advertising;
3440
3441                         if (bp->phy_port == PORT_TP) {
3442                                 bp->autoneg = AUTONEG_SPEED;
3443                                 bp->advertising = ADVERTISED_10baseT_Half |
3444                                         ADVERTISED_10baseT_Full |
3445                                         ADVERTISED_100baseT_Half |
3446                                         ADVERTISED_100baseT_Full |
3447                                         ADVERTISED_Autoneg;
3448                         }
3449
3450                         spin_lock_bh(&bp->phy_lock);
3451                         bnx2_setup_phy(bp, bp->phy_port);
3452                         spin_unlock_bh(&bp->phy_lock);
3453
3454                         bp->autoneg = autoneg;
3455                         bp->advertising = advertising;
3456
3457                         bnx2_set_mac_addr(bp);
3458
3459                         val = REG_RD(bp, BNX2_EMAC_MODE);
3460
3461                         /* Enable port mode. */
3462                         val &= ~BNX2_EMAC_MODE_PORT;
3463                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3464                                BNX2_EMAC_MODE_ACPI_RCVD |
3465                                BNX2_EMAC_MODE_MPKT;
3466                         if (bp->phy_port == PORT_TP)
3467                                 val |= BNX2_EMAC_MODE_PORT_MII;
3468                         else {
3469                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3470                                 if (bp->line_speed == SPEED_2500)
3471                                         val |= BNX2_EMAC_MODE_25G_MODE;
3472                         }
3473
3474                         REG_WR(bp, BNX2_EMAC_MODE, val);
3475
3476                         /* receive all multicast */
3477                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3478                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3479                                        0xffffffff);
3480                         }
3481                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3482                                BNX2_EMAC_RX_MODE_SORT_MODE);
3483
3484                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3485                               BNX2_RPM_SORT_USER0_MC_EN;
3486                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3487                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3488                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3489                                BNX2_RPM_SORT_USER0_ENA);
3490
3491                         /* Need to enable EMAC and RPM for WOL. */
3492                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3493                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3494                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3495                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3496
3497                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3498                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3499                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3500
3501                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3502                 }
3503                 else {
3504                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3505                 }
3506
3507                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3508                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3509
3510                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3511                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3512                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3513
3514                         if (bp->wol)
3515                                 pmcsr |= 3;
3516                 }
3517                 else {
3518                         pmcsr |= 3;
3519                 }
3520                 if (bp->wol) {
3521                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3522                 }
3523                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3524                                       pmcsr);
3525
3526                 /* No more memory access after this point until
3527                  * device is brought back to D0.
3528                  */
3529                 udelay(50);
3530                 break;
3531         }
3532         default:
3533                 return -EINVAL;
3534         }
3535         return 0;
3536 }
3537
3538 static int
3539 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3540 {
3541         u32 val;
3542         int j;
3543
3544         /* Request access to the flash interface. */
3545         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3546         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3547                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3548                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3549                         break;
3550
3551                 udelay(5);
3552         }
3553
3554         if (j >= NVRAM_TIMEOUT_COUNT)
3555                 return -EBUSY;
3556
3557         return 0;
3558 }
3559
3560 static int
3561 bnx2_release_nvram_lock(struct bnx2 *bp)
3562 {
3563         int j;
3564         u32 val;
3565
3566         /* Relinquish nvram interface. */
3567         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3568
3569         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3570                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3571                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3572                         break;
3573
3574                 udelay(5);
3575         }
3576
3577         if (j >= NVRAM_TIMEOUT_COUNT)
3578                 return -EBUSY;
3579
3580         return 0;
3581 }
3582
3583
3584 static int
3585 bnx2_enable_nvram_write(struct bnx2 *bp)
3586 {
3587         u32 val;
3588
3589         val = REG_RD(bp, BNX2_MISC_CFG);
3590         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3591
3592         if (bp->flash_info->flags & BNX2_NV_WREN) {
3593                 int j;
3594
3595                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3596                 REG_WR(bp, BNX2_NVM_COMMAND,
3597                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3598
3599                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3600                         udelay(5);
3601
3602                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3603                         if (val & BNX2_NVM_COMMAND_DONE)
3604                                 break;
3605                 }
3606
3607                 if (j >= NVRAM_TIMEOUT_COUNT)
3608                         return -EBUSY;
3609         }
3610         return 0;
3611 }
3612
3613 static void
3614 bnx2_disable_nvram_write(struct bnx2 *bp)
3615 {
3616         u32 val;
3617
3618         val = REG_RD(bp, BNX2_MISC_CFG);
3619         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3620 }
3621
3622
3623 static void
3624 bnx2_enable_nvram_access(struct bnx2 *bp)
3625 {
3626         u32 val;
3627
3628         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3629         /* Enable both bits, even on read. */
3630         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3631                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3632 }
3633
3634 static void
3635 bnx2_disable_nvram_access(struct bnx2 *bp)
3636 {
3637         u32 val;
3638
3639         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3640         /* Disable both bits, even after read. */
3641         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3642                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3643                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3644 }
3645
3646 static int
3647 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3648 {
3649         u32 cmd;
3650         int j;
3651
3652         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3653                 /* Buffered flash, no erase needed */
3654                 return 0;
3655
3656         /* Build an erase command */
3657         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3658               BNX2_NVM_COMMAND_DOIT;
3659
3660         /* Need to clear DONE bit separately. */
3661         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3662
3663         /* Address of the NVRAM to read from. */
3664         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3665
3666         /* Issue an erase command. */
3667         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3668
3669         /* Wait for completion. */
3670         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3671                 u32 val;
3672
3673                 udelay(5);
3674
3675                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3676                 if (val & BNX2_NVM_COMMAND_DONE)
3677                         break;
3678         }
3679
3680         if (j >= NVRAM_TIMEOUT_COUNT)
3681                 return -EBUSY;
3682
3683         return 0;
3684 }
3685
3686 static int
3687 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3688 {
3689         u32 cmd;
3690         int j;
3691
3692         /* Build the command word. */
3693         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3694
3695         /* Calculate an offset of a buffered flash, not needed for 5709. */
3696         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3697                 offset = ((offset / bp->flash_info->page_size) <<
3698                            bp->flash_info->page_bits) +
3699                           (offset % bp->flash_info->page_size);
3700         }
3701
3702         /* Need to clear DONE bit separately. */
3703         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3704
3705         /* Address of the NVRAM to read from. */
3706         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3707
3708         /* Issue a read command. */
3709         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3710
3711         /* Wait for completion. */
3712         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3713                 u32 val;
3714
3715                 udelay(5);
3716
3717                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3718                 if (val & BNX2_NVM_COMMAND_DONE) {
3719                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3720                         memcpy(ret_val, &v, 4);
3721                         break;
3722                 }
3723         }
3724         if (j >= NVRAM_TIMEOUT_COUNT)
3725                 return -EBUSY;
3726
3727         return 0;
3728 }
3729
3730
3731 static int
3732 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3733 {
3734         u32 cmd;
3735         __be32 val32;
3736         int j;
3737
3738         /* Build the command word. */
3739         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3740
3741         /* Calculate an offset of a buffered flash, not needed for 5709. */
3742         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3743                 offset = ((offset / bp->flash_info->page_size) <<
3744                           bp->flash_info->page_bits) +
3745                          (offset % bp->flash_info->page_size);
3746         }
3747
3748         /* Need to clear DONE bit separately. */
3749         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3750
3751         memcpy(&val32, val, 4);
3752
3753         /* Write the data. */
3754         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3755
3756         /* Address of the NVRAM to write to. */
3757         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3758
3759         /* Issue the write command. */
3760         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3761
3762         /* Wait for completion. */
3763         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3764                 udelay(5);
3765
3766                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3767                         break;
3768         }
3769         if (j >= NVRAM_TIMEOUT_COUNT)
3770                 return -EBUSY;
3771
3772         return 0;
3773 }
3774
3775 static int
3776 bnx2_init_nvram(struct bnx2 *bp)
3777 {
3778         u32 val;
3779         int j, entry_count, rc = 0;
3780         struct flash_spec *flash;
3781
3782         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3783                 bp->flash_info = &flash_5709;
3784                 goto get_flash_size;
3785         }
3786
3787         /* Determine the selected interface. */
3788         val = REG_RD(bp, BNX2_NVM_CFG1);
3789
3790         entry_count = ARRAY_SIZE(flash_table);
3791
3792         if (val & 0x40000000) {
3793
3794                 /* Flash interface has been reconfigured */
3795                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3796                      j++, flash++) {
3797                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3798                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3799                                 bp->flash_info = flash;
3800                                 break;
3801                         }
3802                 }
3803         }
3804         else {
3805                 u32 mask;
3806                 /* Not yet been reconfigured */
3807
3808                 if (val & (1 << 23))
3809                         mask = FLASH_BACKUP_STRAP_MASK;
3810                 else
3811                         mask = FLASH_STRAP_MASK;
3812
3813                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3814                         j++, flash++) {
3815
3816                         if ((val & mask) == (flash->strapping & mask)) {
3817                                 bp->flash_info = flash;
3818
3819                                 /* Request access to the flash interface. */
3820                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3821                                         return rc;
3822
3823                                 /* Enable access to flash interface */
3824                                 bnx2_enable_nvram_access(bp);
3825
3826                                 /* Reconfigure the flash interface */
3827                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3828                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3829                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3830                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3831
3832                                 /* Disable access to flash interface */
3833                                 bnx2_disable_nvram_access(bp);
3834                                 bnx2_release_nvram_lock(bp);
3835
3836                                 break;
3837                         }
3838                 }
3839         } /* if (val & 0x40000000) */
3840
3841         if (j == entry_count) {
3842                 bp->flash_info = NULL;
3843                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3844                 return -ENODEV;
3845         }
3846
3847 get_flash_size:
3848         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3849         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3850         if (val)
3851                 bp->flash_size = val;
3852         else
3853                 bp->flash_size = bp->flash_info->total_size;
3854
3855         return rc;
3856 }
3857
3858 static int
3859 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3860                 int buf_size)
3861 {
3862         int rc = 0;
3863         u32 cmd_flags, offset32, len32, extra;
3864
3865         if (buf_size == 0)
3866                 return 0;
3867
3868         /* Request access to the flash interface. */
3869         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3870                 return rc;
3871
3872         /* Enable access to flash interface */
3873         bnx2_enable_nvram_access(bp);
3874
3875         len32 = buf_size;
3876         offset32 = offset;
3877         extra = 0;
3878
3879         cmd_flags = 0;
3880
3881         if (offset32 & 3) {
3882                 u8 buf[4];
3883                 u32 pre_len;
3884
3885                 offset32 &= ~3;
3886                 pre_len = 4 - (offset & 3);
3887
3888                 if (pre_len >= len32) {
3889                         pre_len = len32;
3890                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3891                                     BNX2_NVM_COMMAND_LAST;
3892                 }
3893                 else {
3894                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3895                 }
3896
3897                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3898
3899                 if (rc)
3900                         return rc;
3901
3902                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3903
3904                 offset32 += 4;
3905                 ret_buf += pre_len;
3906                 len32 -= pre_len;
3907         }
3908         if (len32 & 3) {
3909                 extra = 4 - (len32 & 3);
3910                 len32 = (len32 + 4) & ~3;
3911         }
3912
3913         if (len32 == 4) {
3914                 u8 buf[4];
3915
3916                 if (cmd_flags)
3917                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3918                 else
3919                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3920                                     BNX2_NVM_COMMAND_LAST;
3921
3922                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3923
3924                 memcpy(ret_buf, buf, 4 - extra);
3925         }
3926         else if (len32 > 0) {
3927                 u8 buf[4];
3928
3929                 /* Read the first word. */
3930                 if (cmd_flags)
3931                         cmd_flags = 0;
3932                 else
3933                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3934
3935                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3936
3937                 /* Advance to the next dword. */
3938                 offset32 += 4;
3939                 ret_buf += 4;
3940                 len32 -= 4;
3941
3942                 while (len32 > 4 && rc == 0) {
3943                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3944
3945                         /* Advance to the next dword. */
3946                         offset32 += 4;
3947                         ret_buf += 4;
3948                         len32 -= 4;
3949                 }
3950
3951                 if (rc)
3952                         return rc;
3953
3954                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3955                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3956
3957                 memcpy(ret_buf, buf, 4 - extra);
3958         }
3959
3960         /* Disable access to flash interface */
3961         bnx2_disable_nvram_access(bp);
3962
3963         bnx2_release_nvram_lock(bp);
3964
3965         return rc;
3966 }
3967
3968 static int
3969 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3970                 int buf_size)
3971 {
3972         u32 written, offset32, len32;
3973         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3974         int rc = 0;
3975         int align_start, align_end;
3976
3977         buf = data_buf;
3978         offset32 = offset;
3979         len32 = buf_size;
3980         align_start = align_end = 0;
3981
3982         if ((align_start = (offset32 & 3))) {
3983                 offset32 &= ~3;
3984                 len32 += align_start;
3985                 if (len32 < 4)
3986                         len32 = 4;
3987                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3988                         return rc;
3989         }
3990
3991         if (len32 & 3) {
3992                 align_end = 4 - (len32 & 3);
3993                 len32 += align_end;
3994                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3995                         return rc;
3996         }
3997
3998         if (align_start || align_end) {
3999                 align_buf = kmalloc(len32, GFP_KERNEL);
4000                 if (align_buf == NULL)
4001                         return -ENOMEM;
4002                 if (align_start) {
4003                         memcpy(align_buf, start, 4);
4004                 }
4005                 if (align_end) {
4006                         memcpy(align_buf + len32 - 4, end, 4);
4007                 }
4008                 memcpy(align_buf + align_start, data_buf, buf_size);
4009                 buf = align_buf;
4010         }
4011
4012         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4013                 flash_buffer = kmalloc(264, GFP_KERNEL);
4014                 if (flash_buffer == NULL) {
4015                         rc = -ENOMEM;
4016                         goto nvram_write_end;
4017                 }
4018         }
4019
4020         written = 0;
4021         while ((written < len32) && (rc == 0)) {
4022                 u32 page_start, page_end, data_start, data_end;
4023                 u32 addr, cmd_flags;
4024                 int i;
4025
4026                 /* Find the page_start addr */
4027                 page_start = offset32 + written;
4028                 page_start -= (page_start % bp->flash_info->page_size);
4029                 /* Find the page_end addr */
4030                 page_end = page_start + bp->flash_info->page_size;
4031                 /* Find the data_start addr */
4032                 data_start = (written == 0) ? offset32 : page_start;
4033                 /* Find the data_end addr */
4034                 data_end = (page_end > offset32 + len32) ?
4035                         (offset32 + len32) : page_end;
4036
4037                 /* Request access to the flash interface. */
4038                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4039                         goto nvram_write_end;
4040
4041                 /* Enable access to flash interface */
4042                 bnx2_enable_nvram_access(bp);
4043
4044                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4045                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4046                         int j;
4047
4048                         /* Read the whole page into the buffer
4049                          * (non-buffer flash only) */
4050                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4051                                 if (j == (bp->flash_info->page_size - 4)) {
4052                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4053                                 }
4054                                 rc = bnx2_nvram_read_dword(bp,
4055                                         page_start + j,
4056                                         &flash_buffer[j],
4057                                         cmd_flags);
4058
4059                                 if (rc)
4060                                         goto nvram_write_end;
4061
4062                                 cmd_flags = 0;
4063                         }
4064                 }
4065
4066                 /* Enable writes to flash interface (unlock write-protect) */
4067                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4068                         goto nvram_write_end;
4069
4070                 /* Loop to write back the buffer data from page_start to
4071                  * data_start */
4072                 i = 0;
4073                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4074                         /* Erase the page */
4075                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4076                                 goto nvram_write_end;
4077
4078                         /* Re-enable the write again for the actual write */
4079                         bnx2_enable_nvram_write(bp);
4080
4081                         for (addr = page_start; addr < data_start;
4082                                 addr += 4, i += 4) {
4083
4084                                 rc = bnx2_nvram_write_dword(bp, addr,
4085                                         &flash_buffer[i], cmd_flags);
4086
4087                                 if (rc != 0)
4088                                         goto nvram_write_end;
4089
4090                                 cmd_flags = 0;
4091                         }
4092                 }
4093
4094                 /* Loop to write the new data from data_start to data_end */
4095                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4096                         if ((addr == page_end - 4) ||
4097                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4098                                  (addr == data_end - 4))) {
4099
4100                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4101                         }
4102                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4103                                 cmd_flags);
4104
4105                         if (rc != 0)
4106                                 goto nvram_write_end;
4107
4108                         cmd_flags = 0;
4109                         buf += 4;
4110                 }
4111
4112                 /* Loop to write back the buffer data from data_end
4113                  * to page_end */
4114                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4115                         for (addr = data_end; addr < page_end;
4116                                 addr += 4, i += 4) {
4117
4118                                 if (addr == page_end-4) {
4119                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4120                                 }
4121                                 rc = bnx2_nvram_write_dword(bp, addr,
4122                                         &flash_buffer[i], cmd_flags);
4123
4124                                 if (rc != 0)
4125                                         goto nvram_write_end;
4126
4127                                 cmd_flags = 0;
4128                         }
4129                 }
4130
4131                 /* Disable writes to flash interface (lock write-protect) */
4132                 bnx2_disable_nvram_write(bp);
4133
4134                 /* Disable access to flash interface */
4135                 bnx2_disable_nvram_access(bp);
4136                 bnx2_release_nvram_lock(bp);
4137
4138                 /* Increment written */
4139                 written += data_end - data_start;
4140         }
4141
4142 nvram_write_end:
4143         kfree(flash_buffer);
4144         kfree(align_buf);
4145         return rc;
4146 }
4147
4148 static void
4149 bnx2_init_remote_phy(struct bnx2 *bp)
4150 {
4151         u32 val;
4152
4153         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4154         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4155                 return;
4156
4157         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4158         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4159                 return;
4160
4161         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4162                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4163
4164                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4165                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4166                         bp->phy_port = PORT_FIBRE;
4167                 else
4168                         bp->phy_port = PORT_TP;
4169
4170                 if (netif_running(bp->dev)) {
4171                         u32 sig;
4172
4173                         if (val & BNX2_LINK_STATUS_LINK_UP) {
4174                                 bp->link_up = 1;
4175                                 netif_carrier_on(bp->dev);
4176                         } else {
4177                                 bp->link_up = 0;
4178                                 netif_carrier_off(bp->dev);
4179                         }
4180                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4181                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4182                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4183                 }
4184         }
4185 }
4186
4187 static void
4188 bnx2_setup_msix_tbl(struct bnx2 *bp)
4189 {
4190         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4191
4192         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4193         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4194 }
4195
4196 static int
4197 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4198 {
4199         u32 val;
4200         int i, rc = 0;
4201         u8 old_port;
4202
4203         /* Wait for the current PCI transaction to complete before
4204          * issuing a reset. */
4205         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4206                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4207                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4208                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4209                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4210         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4211         udelay(5);
4212
4213         /* Wait for the firmware to tell us it is ok to issue a reset. */
4214         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4215
4216         /* Deposit a driver reset signature so the firmware knows that
4217          * this is a soft reset. */
4218         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4219                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4220
4221         /* Do a dummy read to force the chip to complete all current transaction
4222          * before we issue a reset. */
4223         val = REG_RD(bp, BNX2_MISC_ID);
4224
4225         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4226                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4227                 REG_RD(bp, BNX2_MISC_COMMAND);
4228                 udelay(5);
4229
4230                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4231                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4232
4233                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4234
4235         } else {
4236                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4237                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4238                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4239
4240                 /* Chip reset. */
4241                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4242
4243                 /* Reading back any register after chip reset will hang the
4244                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4245                  * of margin for write posting.
4246                  */
4247                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4248                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4249                         msleep(20);
4250
4251                 /* Reset takes approximate 30 usec */
4252                 for (i = 0; i < 10; i++) {
4253                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4254                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4255                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4256                                 break;
4257                         udelay(10);
4258                 }
4259
4260                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4261                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4262                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4263                         return -EBUSY;
4264                 }
4265         }
4266
4267         /* Make sure byte swapping is properly configured. */
4268         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4269         if (val != 0x01020304) {
4270                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4271                 return -ENODEV;
4272         }
4273
4274         /* Wait for the firmware to finish its initialization. */
4275         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4276         if (rc)
4277                 return rc;
4278
4279         spin_lock_bh(&bp->phy_lock);
4280         old_port = bp->phy_port;
4281         bnx2_init_remote_phy(bp);
4282         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4283             old_port != bp->phy_port)
4284                 bnx2_set_default_remote_link(bp);
4285         spin_unlock_bh(&bp->phy_lock);
4286
4287         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4288                 /* Adjust the voltage regular to two steps lower.  The default
4289                  * of this register is 0x0000000e. */
4290                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4291
4292                 /* Remove bad rbuf memory from the free pool. */
4293                 rc = bnx2_alloc_bad_rbuf(bp);
4294         }
4295
4296         if (bp->flags & BNX2_FLAG_USING_MSIX)
4297                 bnx2_setup_msix_tbl(bp);
4298
4299         return rc;
4300 }
4301
4302 static int
4303 bnx2_init_chip(struct bnx2 *bp)
4304 {
4305         u32 val;
4306         int rc, i;
4307
4308         /* Make sure the interrupt is not active. */
4309         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4310
4311         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4312               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4313 #ifdef __BIG_ENDIAN
4314               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4315 #endif
4316               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4317               DMA_READ_CHANS << 12 |
4318               DMA_WRITE_CHANS << 16;
4319
4320         val |= (0x2 << 20) | (1 << 11);
4321
4322         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4323                 val |= (1 << 23);
4324
4325         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4326             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4327                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4328
4329         REG_WR(bp, BNX2_DMA_CONFIG, val);
4330
4331         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4332                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4333                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4334                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4335         }
4336
4337         if (bp->flags & BNX2_FLAG_PCIX) {
4338                 u16 val16;
4339
4340                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4341                                      &val16);
4342                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4343                                       val16 & ~PCI_X_CMD_ERO);
4344         }
4345
4346         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4347                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4348                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4349                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4350
4351         /* Initialize context mapping and zero out the quick contexts.  The
4352          * context block must have already been enabled. */
4353         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4354                 rc = bnx2_init_5709_context(bp);
4355                 if (rc)
4356                         return rc;
4357         } else
4358                 bnx2_init_context(bp);
4359
4360         if ((rc = bnx2_init_cpus(bp)) != 0)
4361                 return rc;
4362
4363         bnx2_init_nvram(bp);
4364
4365         bnx2_set_mac_addr(bp);
4366
4367         val = REG_RD(bp, BNX2_MQ_CONFIG);
4368         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4369         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4370         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4371                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4372
4373         REG_WR(bp, BNX2_MQ_CONFIG, val);
4374
4375         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4376         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4377         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4378
4379         val = (BCM_PAGE_BITS - 8) << 24;
4380         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4381
4382         /* Configure page size. */
4383         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4384         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4385         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4386         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4387
4388         val = bp->mac_addr[0] +
4389               (bp->mac_addr[1] << 8) +
4390               (bp->mac_addr[2] << 16) +
4391               bp->mac_addr[3] +
4392               (bp->mac_addr[4] << 8) +
4393               (bp->mac_addr[5] << 16);
4394         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4395
4396         /* Program the MTU.  Also include 4 bytes for CRC32. */
4397         val = bp->dev->mtu + ETH_HLEN + 4;
4398         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4399                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4400         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4401
4402         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4403                 bp->bnx2_napi[i].last_status_idx = 0;
4404
4405         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4406
4407         /* Set up how to generate a link change interrupt. */
4408         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4409
4410         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4411                (u64) bp->status_blk_mapping & 0xffffffff);
4412         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4413
4414         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4415                (u64) bp->stats_blk_mapping & 0xffffffff);
4416         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4417                (u64) bp->stats_blk_mapping >> 32);
4418
4419         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4420                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4421
4422         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4423                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4424
4425         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4426                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4427
4428         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4429
4430         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4431
4432         REG_WR(bp, BNX2_HC_COM_TICKS,
4433                (bp->com_ticks_int << 16) | bp->com_ticks);
4434
4435         REG_WR(bp, BNX2_HC_CMD_TICKS,
4436                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4437
4438         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4439                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4440         else
4441                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4442         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4443
4444         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4445                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4446         else {
4447                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4448                       BNX2_HC_CONFIG_COLLECT_STATS;
4449         }
4450
4451         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4452                 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4453                            BNX2_HC_SB_CONFIG_1;
4454
4455                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4456                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4457
4458                 REG_WR(bp, base,
4459                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4460                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4461
4462                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4463                         (bp->tx_quick_cons_trip_int << 16) |
4464                          bp->tx_quick_cons_trip);
4465
4466                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4467                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4468
4469                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4470         }
4471
4472         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4473                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4474
4475         REG_WR(bp, BNX2_HC_CONFIG, val);
4476
4477         /* Clear internal stats counters. */
4478         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4479
4480         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4481
4482         /* Initialize the receive filter. */
4483         bnx2_set_rx_mode(bp->dev);
4484
4485         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4486                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4487                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4488                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4489         }
4490         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4491                           0);
4492
4493         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4494         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4495
4496         udelay(20);
4497
4498         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4499
4500         return rc;
4501 }
4502
4503 static void
4504 bnx2_clear_ring_states(struct bnx2 *bp)
4505 {
4506         struct bnx2_napi *bnapi;
4507         int i;
4508
4509         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4510                 bnapi = &bp->bnx2_napi[i];
4511
4512                 bnapi->tx_cons = 0;
4513                 bnapi->hw_tx_cons = 0;
4514                 bnapi->rx_prod_bseq = 0;
4515                 bnapi->rx_prod = 0;
4516                 bnapi->rx_cons = 0;
4517                 bnapi->rx_pg_prod = 0;
4518                 bnapi->rx_pg_cons = 0;
4519         }
4520 }
4521
4522 static void
4523 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4524 {
4525         u32 val, offset0, offset1, offset2, offset3;
4526
4527         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4528                 offset0 = BNX2_L2CTX_TYPE_XI;
4529                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4530                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4531                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4532         } else {
4533                 offset0 = BNX2_L2CTX_TYPE;
4534                 offset1 = BNX2_L2CTX_CMD_TYPE;
4535                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4536                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4537         }
4538         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4539         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4540
4541         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4542         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4543
4544         val = (u64) bp->tx_desc_mapping >> 32;
4545         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4546
4547         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4548         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4549 }
4550
4551 static void
4552 bnx2_init_tx_ring(struct bnx2 *bp)
4553 {
4554         struct tx_bd *txbd;
4555         u32 cid = TX_CID;
4556         struct bnx2_napi *bnapi;
4557
4558         bp->tx_vec = 0;
4559         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4560                 cid = TX_TSS_CID;
4561                 bp->tx_vec = BNX2_TX_VEC;
4562                 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4563                        (TX_TSS_CID << 7));
4564         }
4565         bnapi = &bp->bnx2_napi[bp->tx_vec];
4566
4567         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4568
4569         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4570
4571         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4572         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4573
4574         bp->tx_prod = 0;
4575         bp->tx_prod_bseq = 0;
4576
4577         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4578         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4579
4580         bnx2_init_tx_context(bp, cid);
4581 }
4582
4583 static void
4584 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4585                      int num_rings)
4586 {
4587         int i;
4588         struct rx_bd *rxbd;
4589
4590         for (i = 0; i < num_rings; i++) {
4591                 int j;
4592
4593                 rxbd = &rx_ring[i][0];
4594                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4595                         rxbd->rx_bd_len = buf_size;
4596                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4597                 }
4598                 if (i == (num_rings - 1))
4599                         j = 0;
4600                 else
4601                         j = i + 1;
4602                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4603                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4604         }
4605 }
4606
4607 static void
4608 bnx2_init_rx_ring(struct bnx2 *bp)
4609 {
4610         int i;
4611         u16 prod, ring_prod;
4612         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4613         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4614
4615         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4616                              bp->rx_buf_use_size, bp->rx_max_ring);
4617
4618         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4619         if (bp->rx_pg_ring_size) {
4620                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4621                                      bp->rx_pg_desc_mapping,
4622                                      PAGE_SIZE, bp->rx_max_pg_ring);
4623                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4624                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4625                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4626                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4627
4628                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4629                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4630
4631                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4632                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4633
4634                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4635                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4636         }
4637
4638         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4639         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4640         val |= 0x02 << 8;
4641         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4642
4643         val = (u64) bp->rx_desc_mapping[0] >> 32;
4644         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4645
4646         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4647         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4648
4649         ring_prod = prod = bnapi->rx_pg_prod;
4650         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4651                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4652                         break;
4653                 prod = NEXT_RX_BD(prod);
4654                 ring_prod = RX_PG_RING_IDX(prod);
4655         }
4656         bnapi->rx_pg_prod = prod;
4657
4658         ring_prod = prod = bnapi->rx_prod;
4659         for (i = 0; i < bp->rx_ring_size; i++) {
4660                 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4661                         break;
4662                 }
4663                 prod = NEXT_RX_BD(prod);
4664                 ring_prod = RX_RING_IDX(prod);
4665         }
4666         bnapi->rx_prod = prod;
4667
4668         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4669                  bnapi->rx_pg_prod);
4670         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4671
4672         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4673 }
4674
4675 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4676 {
4677         u32 max, num_rings = 1;
4678
4679         while (ring_size > MAX_RX_DESC_CNT) {
4680                 ring_size -= MAX_RX_DESC_CNT;
4681                 num_rings++;
4682         }
4683         /* round to next power of 2 */
4684         max = max_size;
4685         while ((max & num_rings) == 0)
4686                 max >>= 1;
4687
4688         if (num_rings != max)
4689                 max <<= 1;
4690
4691         return max;
4692 }
4693
4694 static void
4695 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4696 {
4697         u32 rx_size, rx_space, jumbo_size;
4698
4699         /* 8 for CRC and VLAN */
4700         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4701
4702         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4703                 sizeof(struct skb_shared_info);
4704
4705         bp->rx_copy_thresh = RX_COPY_THRESH;
4706         bp->rx_pg_ring_size = 0;
4707         bp->rx_max_pg_ring = 0;
4708         bp->rx_max_pg_ring_idx = 0;
4709         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4710                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4711
4712                 jumbo_size = size * pages;
4713                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4714                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4715
4716                 bp->rx_pg_ring_size = jumbo_size;
4717                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4718                                                         MAX_RX_PG_RINGS);
4719                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4720                 rx_size = RX_COPY_THRESH + bp->rx_offset;
4721                 bp->rx_copy_thresh = 0;
4722         }
4723
4724         bp->rx_buf_use_size = rx_size;
4725         /* hw alignment */
4726         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4727         bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4728         bp->rx_ring_size = size;
4729         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4730         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4731 }
4732
4733 static void
4734 bnx2_free_tx_skbs(struct bnx2 *bp)
4735 {
4736         int i;
4737
4738         if (bp->tx_buf_ring == NULL)
4739                 return;
4740
4741         for (i = 0; i < TX_DESC_CNT; ) {
4742                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4743                 struct sk_buff *skb = tx_buf->skb;
4744                 int j, last;
4745
4746                 if (skb == NULL) {
4747                         i++;
4748                         continue;
4749                 }
4750
4751                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4752                         skb_headlen(skb), PCI_DMA_TODEVICE);
4753
4754                 tx_buf->skb = NULL;
4755
4756                 last = skb_shinfo(skb)->nr_frags;
4757                 for (j = 0; j < last; j++) {
4758                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4759                         pci_unmap_page(bp->pdev,
4760                                 pci_unmap_addr(tx_buf, mapping),
4761                                 skb_shinfo(skb)->frags[j].size,
4762                                 PCI_DMA_TODEVICE);
4763                 }
4764                 dev_kfree_skb(skb);
4765                 i += j + 1;
4766         }
4767
4768 }
4769
4770 static void
4771 bnx2_free_rx_skbs(struct bnx2 *bp)
4772 {
4773         int i;
4774
4775         if (bp->rx_buf_ring == NULL)
4776                 return;
4777
4778         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4779                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4780                 struct sk_buff *skb = rx_buf->skb;
4781
4782                 if (skb == NULL)
4783                         continue;
4784
4785                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4786                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4787
4788                 rx_buf->skb = NULL;
4789
4790                 dev_kfree_skb(skb);
4791         }
4792         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4793                 bnx2_free_rx_page(bp, i);
4794 }
4795
4796 static void
4797 bnx2_free_skbs(struct bnx2 *bp)
4798 {
4799         bnx2_free_tx_skbs(bp);
4800         bnx2_free_rx_skbs(bp);
4801 }
4802
4803 static int
4804 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4805 {
4806         int rc;
4807
4808         rc = bnx2_reset_chip(bp, reset_code);
4809         bnx2_free_skbs(bp);
4810         if (rc)
4811                 return rc;
4812
4813         if ((rc = bnx2_init_chip(bp)) != 0)
4814                 return rc;
4815
4816         bnx2_clear_ring_states(bp);
4817         bnx2_init_tx_ring(bp);
4818         bnx2_init_rx_ring(bp);
4819         return 0;
4820 }
4821
4822 static int
4823 bnx2_init_nic(struct bnx2 *bp)
4824 {
4825         int rc;
4826
4827         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4828                 return rc;
4829
4830         spin_lock_bh(&bp->phy_lock);
4831         bnx2_init_phy(bp);
4832         bnx2_set_link(bp);
4833         spin_unlock_bh(&bp->phy_lock);
4834         return 0;
4835 }
4836
4837 static int
4838 bnx2_test_registers(struct bnx2 *bp)
4839 {
4840         int ret;
4841         int i, is_5709;
4842         static const struct {
4843                 u16   offset;
4844                 u16   flags;
4845 #define BNX2_FL_NOT_5709        1
4846                 u32   rw_mask;
4847                 u32   ro_mask;
4848         } reg_tbl[] = {
4849                 { 0x006c, 0, 0x00000000, 0x0000003f },
4850                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4851                 { 0x0094, 0, 0x00000000, 0x00000000 },
4852
4853                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4854                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4855                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4856                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4857                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4858                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4859                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4860                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4861                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4862
4863                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4864                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4865                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4866                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4867                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4868                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4869
4870                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4871                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4872                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4873
4874                 { 0x1000, 0, 0x00000000, 0x00000001 },
4875                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4876
4877                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4878                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4879                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4880                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4881                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4882                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4883                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4884                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4885                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4886                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4887
4888                 { 0x1800, 0, 0x00000000, 0x00000001 },
4889                 { 0x1804, 0, 0x00000000, 0x00000003 },
4890
4891                 { 0x2800, 0, 0x00000000, 0x00000001 },
4892                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4893                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4894                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4895                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4896                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4897                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4898                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4899                 { 0x2840, 0, 0x00000000, 0xffffffff },
4900                 { 0x2844, 0, 0x00000000, 0xffffffff },
4901                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4902                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4903
4904                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4905                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4906
4907                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4908                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4909                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4910                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4911                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4912                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4913                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4914                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4915                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4916
4917                 { 0x5004, 0, 0x00000000, 0x0000007f },
4918                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4919
4920                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4921                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4922                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4923                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4924                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4925                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4926                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4927                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4928                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4929
4930                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4931                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4932                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4933                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4934                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4935                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4936                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4937                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4938                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4939                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4940                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4941                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4942                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4943                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4944                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4945                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4946                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4947                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4948                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4949                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4950                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4951                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4952                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4953
4954                 { 0xffff, 0, 0x00000000, 0x00000000 },
4955         };
4956
4957         ret = 0;
4958         is_5709 = 0;
4959         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4960                 is_5709 = 1;
4961
4962         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4963                 u32 offset, rw_mask, ro_mask, save_val, val;
4964                 u16 flags = reg_tbl[i].flags;
4965
4966                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4967                         continue;
4968
4969                 offset = (u32) reg_tbl[i].offset;
4970                 rw_mask = reg_tbl[i].rw_mask;
4971                 ro_mask = reg_tbl[i].ro_mask;
4972
4973                 save_val = readl(bp->regview + offset);
4974
4975                 writel(0, bp->regview + offset);
4976
4977                 val = readl(bp->regview + offset);
4978                 if ((val & rw_mask) != 0) {
4979                         goto reg_test_err;
4980                 }
4981
4982                 if ((val & ro_mask) != (save_val & ro_mask)) {
4983                         goto reg_test_err;
4984                 }
4985
4986                 writel(0xffffffff, bp->regview + offset);
4987
4988                 val = readl(bp->regview + offset);
4989                 if ((val & rw_mask) != rw_mask) {
4990                         goto reg_test_err;
4991                 }
4992
4993                 if ((val & ro_mask) != (save_val & ro_mask)) {
4994                         goto reg_test_err;
4995                 }
4996
4997                 writel(save_val, bp->regview + offset);
4998                 continue;
4999
5000 reg_test_err:
5001                 writel(save_val, bp->regview + offset);
5002                 ret = -ENODEV;
5003                 break;
5004         }
5005         return ret;
5006 }
5007
5008 static int
5009 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5010 {
5011         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5012                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5013         int i;
5014
5015         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5016                 u32 offset;
5017
5018                 for (offset = 0; offset < size; offset += 4) {
5019
5020                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5021
5022                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5023                                 test_pattern[i]) {
5024                                 return -ENODEV;
5025                         }
5026                 }
5027         }
5028         return 0;
5029 }
5030
5031 static int
5032 bnx2_test_memory(struct bnx2 *bp)
5033 {
5034         int ret = 0;
5035         int i;
5036         static struct mem_entry {
5037                 u32   offset;
5038                 u32   len;
5039         } mem_tbl_5706[] = {
5040                 { 0x60000,  0x4000 },
5041                 { 0xa0000,  0x3000 },
5042                 { 0xe0000,  0x4000 },
5043                 { 0x120000, 0x4000 },
5044                 { 0x1a0000, 0x4000 },
5045                 { 0x160000, 0x4000 },
5046                 { 0xffffffff, 0    },
5047         },
5048         mem_tbl_5709[] = {
5049                 { 0x60000,  0x4000 },
5050                 { 0xa0000,  0x3000 },
5051                 { 0xe0000,  0x4000 },
5052                 { 0x120000, 0x4000 },
5053                 { 0x1a0000, 0x4000 },
5054                 { 0xffffffff, 0    },
5055         };
5056         struct mem_entry *mem_tbl;
5057
5058         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5059                 mem_tbl = mem_tbl_5709;
5060         else
5061                 mem_tbl = mem_tbl_5706;
5062
5063         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5064                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5065                         mem_tbl[i].len)) != 0) {
5066                         return ret;
5067                 }
5068         }
5069
5070         return ret;
5071 }
5072
5073 #define BNX2_MAC_LOOPBACK       0
5074 #define BNX2_PHY_LOOPBACK       1
5075
5076 static int
5077 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5078 {
5079         unsigned int pkt_size, num_pkts, i;
5080         struct sk_buff *skb, *rx_skb;
5081         unsigned char *packet;
5082         u16 rx_start_idx, rx_idx;
5083         dma_addr_t map;
5084         struct tx_bd *txbd;
5085         struct sw_bd *rx_buf;
5086         struct l2_fhdr *rx_hdr;
5087         int ret = -ENODEV;
5088         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5089
5090         tx_napi = bnapi;
5091         if (bp->flags & BNX2_FLAG_USING_MSIX)
5092                 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5093
5094         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5095                 bp->loopback = MAC_LOOPBACK;
5096                 bnx2_set_mac_loopback(bp);
5097         }
5098         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5099                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5100                         return 0;
5101
5102                 bp->loopback = PHY_LOOPBACK;
5103                 bnx2_set_phy_loopback(bp);
5104         }
5105         else
5106                 return -EINVAL;
5107
5108         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5109         skb = netdev_alloc_skb(bp->dev, pkt_size);
5110         if (!skb)
5111                 return -ENOMEM;
5112         packet = skb_put(skb, pkt_size);
5113         memcpy(packet, bp->dev->dev_addr, 6);
5114         memset(packet + 6, 0x0, 8);
5115         for (i = 14; i < pkt_size; i++)
5116                 packet[i] = (unsigned char) (i & 0xff);
5117
5118         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5119                 PCI_DMA_TODEVICE);
5120
5121         REG_WR(bp, BNX2_HC_COMMAND,
5122                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5123
5124         REG_RD(bp, BNX2_HC_COMMAND);
5125
5126         udelay(5);
5127         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5128
5129         num_pkts = 0;
5130
5131         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5132
5133         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5134         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5135         txbd->tx_bd_mss_nbytes = pkt_size;
5136         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5137
5138         num_pkts++;
5139         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5140         bp->tx_prod_bseq += pkt_size;
5141
5142         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5143         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5144
5145         udelay(100);
5146
5147         REG_WR(bp, BNX2_HC_COMMAND,
5148                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5149
5150         REG_RD(bp, BNX2_HC_COMMAND);
5151
5152         udelay(5);
5153
5154         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5155         dev_kfree_skb(skb);
5156
5157         if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5158                 goto loopback_test_done;
5159
5160         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5161         if (rx_idx != rx_start_idx + num_pkts) {
5162                 goto loopback_test_done;
5163         }
5164
5165         rx_buf = &bp->rx_buf_ring[rx_start_idx];
5166         rx_skb = rx_buf->skb;
5167
5168         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5169         skb_reserve(rx_skb, bp->rx_offset);
5170
5171         pci_dma_sync_single_for_cpu(bp->pdev,
5172                 pci_unmap_addr(rx_buf, mapping),
5173                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5174
5175         if (rx_hdr->l2_fhdr_status &
5176                 (L2_FHDR_ERRORS_BAD_CRC |
5177                 L2_FHDR_ERRORS_PHY_DECODE |
5178                 L2_FHDR_ERRORS_ALIGNMENT |
5179                 L2_FHDR_ERRORS_TOO_SHORT |
5180                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5181
5182                 goto loopback_test_done;
5183         }
5184
5185         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5186                 goto loopback_test_done;
5187         }
5188
5189         for (i = 14; i < pkt_size; i++) {
5190                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5191                         goto loopback_test_done;
5192                 }
5193         }
5194
5195         ret = 0;
5196
5197 loopback_test_done:
5198         bp->loopback = 0;
5199         return ret;
5200 }
5201
5202 #define BNX2_MAC_LOOPBACK_FAILED        1
5203 #define BNX2_PHY_LOOPBACK_FAILED        2
5204 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5205                                          BNX2_PHY_LOOPBACK_FAILED)
5206
5207 static int
5208 bnx2_test_loopback(struct bnx2 *bp)
5209 {
5210         int rc = 0;
5211
5212         if (!netif_running(bp->dev))
5213                 return BNX2_LOOPBACK_FAILED;
5214
5215         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5216         spin_lock_bh(&bp->phy_lock);
5217         bnx2_init_phy(bp);
5218         spin_unlock_bh(&bp->phy_lock);
5219         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5220                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5221         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5222                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5223         return rc;
5224 }
5225
5226 #define NVRAM_SIZE 0x200
5227 #define CRC32_RESIDUAL 0xdebb20e3
5228
5229 static int
5230 bnx2_test_nvram(struct bnx2 *bp)
5231 {
5232         __be32 buf[NVRAM_SIZE / 4];
5233         u8 *data = (u8 *) buf;
5234         int rc = 0;
5235         u32 magic, csum;
5236
5237         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5238                 goto test_nvram_done;
5239
5240         magic = be32_to_cpu(buf[0]);
5241         if (magic != 0x669955aa) {
5242                 rc = -ENODEV;
5243                 goto test_nvram_done;
5244         }
5245
5246         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5247                 goto test_nvram_done;
5248
5249         csum = ether_crc_le(0x100, data);
5250         if (csum != CRC32_RESIDUAL) {
5251                 rc = -ENODEV;
5252                 goto test_nvram_done;
5253         }
5254
5255         csum = ether_crc_le(0x100, data + 0x100);
5256         if (csum != CRC32_RESIDUAL) {
5257                 rc = -ENODEV;
5258         }
5259
5260 test_nvram_done:
5261         return rc;
5262 }
5263
5264 static int
5265 bnx2_test_link(struct bnx2 *bp)
5266 {
5267         u32 bmsr;
5268
5269         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5270                 if (bp->link_up)
5271                         return 0;
5272                 return -ENODEV;
5273         }
5274         spin_lock_bh(&bp->phy_lock);
5275         bnx2_enable_bmsr1(bp);
5276         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5277         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5278         bnx2_disable_bmsr1(bp);
5279         spin_unlock_bh(&bp->phy_lock);
5280
5281         if (bmsr & BMSR_LSTATUS) {
5282                 return 0;
5283         }
5284         return -ENODEV;
5285 }
5286
5287 static int
5288 bnx2_test_intr(struct bnx2 *bp)
5289 {
5290         int i;
5291         u16 status_idx;
5292
5293         if (!netif_running(bp->dev))
5294                 return -ENODEV;
5295
5296         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5297
5298         /* This register is not touched during run-time. */
5299         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5300         REG_RD(bp, BNX2_HC_COMMAND);
5301
5302         for (i = 0; i < 10; i++) {
5303                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5304                         status_idx) {
5305
5306                         break;
5307                 }
5308
5309                 msleep_interruptible(10);
5310         }
5311         if (i < 10)
5312                 return 0;
5313
5314         return -ENODEV;
5315 }
5316
5317 static int
5318 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5319 {
5320         u32 mode_ctl, an_dbg, exp;
5321
5322         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5323         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5324
5325         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5326                 return 0;
5327
5328         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5329         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5330         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5331
5332         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5333                 return 0;
5334
5335         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5336         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5337         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5338
5339         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5340                 return 0;
5341
5342         return 1;
5343 }
5344
5345 static void
5346 bnx2_5706_serdes_timer(struct bnx2 *bp)
5347 {
5348         int check_link = 1;
5349
5350         spin_lock(&bp->phy_lock);
5351         if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
5352                 bnx2_5706s_force_link_dn(bp, 0);
5353                 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
5354                 spin_unlock(&bp->phy_lock);
5355                 return;
5356         }
5357
5358         if (bp->serdes_an_pending) {
5359                 bp->serdes_an_pending--;
5360                 check_link = 0;
5361         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5362                 u32 bmcr;
5363
5364                 bp->current_interval = bp->timer_interval;
5365
5366                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5367
5368                 if (bmcr & BMCR_ANENABLE) {
5369                         if (bnx2_5706_serdes_has_link(bp)) {
5370                                 bmcr &= ~BMCR_ANENABLE;
5371                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5372                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5373                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5374                         }
5375                 }
5376         }
5377         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5378                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5379                 u32 phy2;
5380
5381                 check_link = 0;
5382                 bnx2_write_phy(bp, 0x17, 0x0f01);
5383                 bnx2_read_phy(bp, 0x15, &phy2);
5384                 if (phy2 & 0x20) {
5385                         u32 bmcr;
5386
5387                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5388                         bmcr |= BMCR_ANENABLE;
5389                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5390
5391                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5392                 }
5393         } else
5394                 bp->current_interval = bp->timer_interval;
5395
5396         if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) {
5397                 u32 val;
5398
5399                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5400                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5401                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5402
5403                 if (val & MISC_SHDW_AN_DBG_NOSYNC) {
5404                         bnx2_5706s_force_link_dn(bp, 1);
5405                         bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5406                 }
5407         }
5408         spin_unlock(&bp->phy_lock);
5409 }
5410
5411 static void
5412 bnx2_5708_serdes_timer(struct bnx2 *bp)
5413 {
5414         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5415                 return;
5416
5417         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5418                 bp->serdes_an_pending = 0;
5419                 return;
5420         }
5421
5422         spin_lock(&bp->phy_lock);
5423         if (bp->serdes_an_pending)
5424                 bp->serdes_an_pending--;
5425         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5426                 u32 bmcr;
5427
5428                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5429                 if (bmcr & BMCR_ANENABLE) {
5430                         bnx2_enable_forced_2g5(bp);
5431                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5432                 } else {
5433                         bnx2_disable_forced_2g5(bp);
5434                         bp->serdes_an_pending = 2;
5435                         bp->current_interval = bp->timer_interval;
5436                 }
5437
5438         } else
5439                 bp->current_interval = bp->timer_interval;
5440
5441         spin_unlock(&bp->phy_lock);
5442 }
5443
5444 static void
5445 bnx2_timer(unsigned long data)
5446 {
5447         struct bnx2 *bp = (struct bnx2 *) data;
5448
5449         if (!netif_running(bp->dev))
5450                 return;
5451
5452         if (atomic_read(&bp->intr_sem) != 0)
5453                 goto bnx2_restart_timer;
5454
5455         bnx2_send_heart_beat(bp);
5456
5457         bp->stats_blk->stat_FwRxDrop =
5458                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5459
5460         /* workaround occasional corrupted counters */
5461         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5462                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5463                                             BNX2_HC_COMMAND_STATS_NOW);
5464
5465         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5466                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5467                         bnx2_5706_serdes_timer(bp);
5468                 else
5469                         bnx2_5708_serdes_timer(bp);
5470         }
5471
5472 bnx2_restart_timer:
5473         mod_timer(&bp->timer, jiffies + bp->current_interval);
5474 }
5475
5476 static int
5477 bnx2_request_irq(struct bnx2 *bp)
5478 {
5479         struct net_device *dev = bp->dev;
5480         unsigned long flags;
5481         struct bnx2_irq *irq;
5482         int rc = 0, i;
5483
5484         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5485                 flags = 0;
5486         else
5487                 flags = IRQF_SHARED;
5488
5489         for (i = 0; i < bp->irq_nvecs; i++) {
5490                 irq = &bp->irq_tbl[i];
5491                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5492                                  dev);
5493                 if (rc)
5494                         break;
5495                 irq->requested = 1;
5496         }
5497         return rc;
5498 }
5499
5500 static void
5501 bnx2_free_irq(struct bnx2 *bp)
5502 {
5503         struct net_device *dev = bp->dev;
5504         struct bnx2_irq *irq;
5505         int i;
5506
5507         for (i = 0; i < bp->irq_nvecs; i++) {
5508                 irq = &bp->irq_tbl[i];
5509                 if (irq->requested)
5510                         free_irq(irq->vector, dev);
5511                 irq->requested = 0;
5512         }
5513         if (bp->flags & BNX2_FLAG_USING_MSI)
5514                 pci_disable_msi(bp->pdev);
5515         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5516                 pci_disable_msix(bp->pdev);
5517
5518         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5519 }
5520
5521 static void
5522 bnx2_enable_msix(struct bnx2 *bp)
5523 {
5524         int i, rc;
5525         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5526
5527         bnx2_setup_msix_tbl(bp);
5528         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5529         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5530         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5531
5532         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5533                 msix_ent[i].entry = i;
5534                 msix_ent[i].vector = 0;
5535         }
5536
5537         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5538         if (rc != 0)
5539                 return;
5540
5541         bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5542         bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5543
5544         strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5545         strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5546         strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5547         strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5548
5549         bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5550         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5551         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5552                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5553 }
5554
5555 static void
5556 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5557 {
5558         bp->irq_tbl[0].handler = bnx2_interrupt;
5559         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5560         bp->irq_nvecs = 1;
5561         bp->irq_tbl[0].vector = bp->pdev->irq;
5562
5563         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5564                 bnx2_enable_msix(bp);
5565
5566         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5567             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5568                 if (pci_enable_msi(bp->pdev) == 0) {
5569                         bp->flags |= BNX2_FLAG_USING_MSI;
5570                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5571                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5572                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5573                         } else
5574                                 bp->irq_tbl[0].handler = bnx2_msi;
5575
5576                         bp->irq_tbl[0].vector = bp->pdev->irq;
5577                 }
5578         }
5579 }
5580
5581 /* Called with rtnl_lock */
5582 static int
5583 bnx2_open(struct net_device *dev)
5584 {
5585         struct bnx2 *bp = netdev_priv(dev);
5586         int rc;
5587
5588         netif_carrier_off(dev);
5589
5590         bnx2_set_power_state(bp, PCI_D0);
5591         bnx2_disable_int(bp);
5592
5593         rc = bnx2_alloc_mem(bp);
5594         if (rc)
5595                 return rc;
5596
5597         bnx2_setup_int_mode(bp, disable_msi);
5598         bnx2_napi_enable(bp);
5599         rc = bnx2_request_irq(bp);
5600
5601         if (rc) {
5602                 bnx2_napi_disable(bp);
5603                 bnx2_free_mem(bp);
5604                 return rc;
5605         }
5606
5607         rc = bnx2_init_nic(bp);
5608
5609         if (rc) {
5610                 bnx2_napi_disable(bp);
5611                 bnx2_free_irq(bp);
5612                 bnx2_free_skbs(bp);
5613                 bnx2_free_mem(bp);
5614                 return rc;
5615         }
5616
5617         mod_timer(&bp->timer, jiffies + bp->current_interval);
5618
5619         atomic_set(&bp->intr_sem, 0);
5620
5621         bnx2_enable_int(bp);
5622
5623         if (bp->flags & BNX2_FLAG_USING_MSI) {
5624                 /* Test MSI to make sure it is working
5625                  * If MSI test fails, go back to INTx mode
5626                  */
5627                 if (bnx2_test_intr(bp) != 0) {
5628                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5629                                " using MSI, switching to INTx mode. Please"
5630                                " report this failure to the PCI maintainer"
5631                                " and include system chipset information.\n",
5632                                bp->dev->name);
5633
5634                         bnx2_disable_int(bp);
5635                         bnx2_free_irq(bp);
5636
5637                         bnx2_setup_int_mode(bp, 1);
5638
5639                         rc = bnx2_init_nic(bp);
5640
5641                         if (!rc)
5642                                 rc = bnx2_request_irq(bp);
5643
5644                         if (rc) {
5645                                 bnx2_napi_disable(bp);
5646                                 bnx2_free_skbs(bp);
5647                                 bnx2_free_mem(bp);
5648                                 del_timer_sync(&bp->timer);
5649                                 return rc;
5650                         }
5651                         bnx2_enable_int(bp);
5652                 }
5653         }
5654         if (bp->flags & BNX2_FLAG_USING_MSI)
5655                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5656         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5657                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5658
5659         netif_start_queue(dev);
5660
5661         return 0;
5662 }
5663
5664 static void
5665 bnx2_reset_task(struct work_struct *work)
5666 {
5667         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5668
5669         if (!netif_running(bp->dev))
5670                 return;
5671
5672         bp->in_reset_task = 1;
5673         bnx2_netif_stop(bp);
5674
5675         bnx2_init_nic(bp);
5676
5677         atomic_set(&bp->intr_sem, 1);
5678         bnx2_netif_start(bp);
5679         bp->in_reset_task = 0;
5680 }
5681
5682 static void
5683 bnx2_tx_timeout(struct net_device *dev)
5684 {
5685         struct bnx2 *bp = netdev_priv(dev);
5686
5687         /* This allows the netif to be shutdown gracefully before resetting */
5688         schedule_work(&bp->reset_task);
5689 }
5690
5691 #ifdef BCM_VLAN
5692 /* Called with rtnl_lock */
5693 static void
5694 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5695 {
5696         struct bnx2 *bp = netdev_priv(dev);
5697
5698         bnx2_netif_stop(bp);
5699
5700         bp->vlgrp = vlgrp;
5701         bnx2_set_rx_mode(dev);
5702
5703         bnx2_netif_start(bp);
5704 }
5705 #endif
5706
5707 /* Called with netif_tx_lock.
5708  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5709  * netif_wake_queue().
5710  */
5711 static int
5712 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5713 {
5714         struct bnx2 *bp = netdev_priv(dev);
5715         dma_addr_t mapping;
5716         struct tx_bd *txbd;
5717         struct sw_bd *tx_buf;
5718         u32 len, vlan_tag_flags, last_frag, mss;
5719         u16 prod, ring_prod;
5720         int i;
5721         struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5722
5723         if (unlikely(bnx2_tx_avail(bp, bnapi) <
5724             (skb_shinfo(skb)->nr_frags + 1))) {
5725                 netif_stop_queue(dev);
5726                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5727                         dev->name);
5728
5729                 return NETDEV_TX_BUSY;
5730         }
5731         len = skb_headlen(skb);
5732         prod = bp->tx_prod;
5733         ring_prod = TX_RING_IDX(prod);
5734
5735         vlan_tag_flags = 0;
5736         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5737                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5738         }
5739
5740         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5741                 vlan_tag_flags |=
5742                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5743         }
5744         if ((mss = skb_shinfo(skb)->gso_size)) {
5745                 u32 tcp_opt_len, ip_tcp_len;
5746                 struct iphdr *iph;
5747
5748                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5749
5750                 tcp_opt_len = tcp_optlen(skb);
5751
5752                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5753                         u32 tcp_off = skb_transport_offset(skb) -
5754                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5755
5756                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5757                                           TX_BD_FLAGS_SW_FLAGS;
5758                         if (likely(tcp_off == 0))
5759                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5760                         else {
5761                                 tcp_off >>= 3;
5762                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5763                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5764                                                   ((tcp_off & 0x10) <<
5765                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5766                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5767                         }
5768                 } else {
5769                         if (skb_header_cloned(skb) &&
5770                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5771                                 dev_kfree_skb(skb);
5772                                 return NETDEV_TX_OK;
5773                         }
5774
5775                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5776
5777                         iph = ip_hdr(skb);
5778                         iph->check = 0;
5779                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5780                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5781                                                                  iph->daddr, 0,
5782                                                                  IPPROTO_TCP,
5783                                                                  0);
5784                         if (tcp_opt_len || (iph->ihl > 5)) {
5785                                 vlan_tag_flags |= ((iph->ihl - 5) +
5786                                                    (tcp_opt_len >> 2)) << 8;
5787                         }
5788                 }
5789         } else
5790                 mss = 0;
5791
5792         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5793
5794         tx_buf = &bp->tx_buf_ring[ring_prod];
5795         tx_buf->skb = skb;
5796         pci_unmap_addr_set(tx_buf, mapping, mapping);
5797
5798         txbd = &bp->tx_desc_ring[ring_prod];
5799
5800         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5801         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5802         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5803         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5804
5805         last_frag = skb_shinfo(skb)->nr_frags;
5806
5807         for (i = 0; i < last_frag; i++) {
5808                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5809
5810                 prod = NEXT_TX_BD(prod);
5811                 ring_prod = TX_RING_IDX(prod);
5812                 txbd = &bp->tx_desc_ring[ring_prod];
5813
5814                 len = frag->size;
5815                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5816                         len, PCI_DMA_TODEVICE);
5817                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5818                                 mapping, mapping);
5819
5820                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5821                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5822                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5823                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5824
5825         }
5826         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5827
5828         prod = NEXT_TX_BD(prod);
5829         bp->tx_prod_bseq += skb->len;
5830
5831         REG_WR16(bp, bp->tx_bidx_addr, prod);
5832         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5833
5834         mmiowb();
5835
5836         bp->tx_prod = prod;
5837         dev->trans_start = jiffies;
5838
5839         if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5840                 netif_stop_queue(dev);
5841                 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5842                         netif_wake_queue(dev);
5843         }
5844
5845         return NETDEV_TX_OK;
5846 }
5847
5848 /* Called with rtnl_lock */
5849 static int
5850 bnx2_close(struct net_device *dev)
5851 {
5852         struct bnx2 *bp = netdev_priv(dev);
5853         u32 reset_code;
5854
5855         /* Calling flush_scheduled_work() may deadlock because
5856          * linkwatch_event() may be on the workqueue and it will try to get
5857          * the rtnl_lock which we are holding.
5858          */
5859         while (bp->in_reset_task)
5860                 msleep(1);
5861
5862         bnx2_disable_int_sync(bp);
5863         bnx2_napi_disable(bp);
5864         del_timer_sync(&bp->timer);
5865         if (bp->flags & BNX2_FLAG_NO_WOL)
5866                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5867         else if (bp->wol)
5868                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5869         else
5870                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5871         bnx2_reset_chip(bp, reset_code);
5872         bnx2_free_irq(bp);
5873         bnx2_free_skbs(bp);
5874         bnx2_free_mem(bp);
5875         bp->link_up = 0;
5876         netif_carrier_off(bp->dev);
5877         bnx2_set_power_state(bp, PCI_D3hot);
5878         return 0;
5879 }
5880
5881 #define GET_NET_STATS64(ctr)                                    \
5882         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5883         (unsigned long) (ctr##_lo)
5884
5885 #define GET_NET_STATS32(ctr)            \
5886         (ctr##_lo)
5887
5888 #if (BITS_PER_LONG == 64)
5889 #define GET_NET_STATS   GET_NET_STATS64
5890 #else
5891 #define GET_NET_STATS   GET_NET_STATS32
5892 #endif
5893
5894 static struct net_device_stats *
5895 bnx2_get_stats(struct net_device *dev)
5896 {
5897         struct bnx2 *bp = netdev_priv(dev);
5898         struct statistics_block *stats_blk = bp->stats_blk;
5899         struct net_device_stats *net_stats = &bp->net_stats;
5900
5901         if (bp->stats_blk == NULL) {
5902                 return net_stats;
5903         }
5904         net_stats->rx_packets =
5905                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5906                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5907                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5908
5909         net_stats->tx_packets =
5910                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5911                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5912                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5913
5914         net_stats->rx_bytes =
5915                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5916
5917         net_stats->tx_bytes =
5918                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5919
5920         net_stats->multicast =
5921                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5922
5923         net_stats->collisions =
5924                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5925
5926         net_stats->rx_length_errors =
5927                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5928                 stats_blk->stat_EtherStatsOverrsizePkts);
5929
5930         net_stats->rx_over_errors =
5931                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5932
5933         net_stats->rx_frame_errors =
5934                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5935
5936         net_stats->rx_crc_errors =
5937                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5938
5939         net_stats->rx_errors = net_stats->rx_length_errors +
5940                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5941                 net_stats->rx_crc_errors;
5942
5943         net_stats->tx_aborted_errors =
5944                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5945                 stats_blk->stat_Dot3StatsLateCollisions);
5946
5947         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5948             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5949                 net_stats->tx_carrier_errors = 0;
5950         else {
5951                 net_stats->tx_carrier_errors =
5952                         (unsigned long)
5953                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5954         }
5955
5956         net_stats->tx_errors =
5957                 (unsigned long)
5958                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5959                 +
5960                 net_stats->tx_aborted_errors +
5961                 net_stats->tx_carrier_errors;
5962
5963         net_stats->rx_missed_errors =
5964                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5965                 stats_blk->stat_FwRxDrop);
5966
5967         return net_stats;
5968 }
5969
5970 /* All ethtool functions called with rtnl_lock */
5971
5972 static int
5973 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5974 {
5975         struct bnx2 *bp = netdev_priv(dev);
5976         int support_serdes = 0, support_copper = 0;
5977
5978         cmd->supported = SUPPORTED_Autoneg;
5979         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5980                 support_serdes = 1;
5981                 support_copper = 1;
5982         } else if (bp->phy_port == PORT_FIBRE)
5983                 support_serdes = 1;
5984         else
5985                 support_copper = 1;
5986
5987         if (support_serdes) {
5988                 cmd->supported |= SUPPORTED_1000baseT_Full |
5989                         SUPPORTED_FIBRE;
5990                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
5991                         cmd->supported |= SUPPORTED_2500baseX_Full;
5992
5993         }
5994         if (support_copper) {
5995                 cmd->supported |= SUPPORTED_10baseT_Half |
5996                         SUPPORTED_10baseT_Full |
5997                         SUPPORTED_100baseT_Half |
5998                         SUPPORTED_100baseT_Full |
5999                         SUPPORTED_1000baseT_Full |
6000                         SUPPORTED_TP;
6001
6002         }
6003
6004         spin_lock_bh(&bp->phy_lock);
6005         cmd->port = bp->phy_port;
6006         cmd->advertising = bp->advertising;
6007
6008         if (bp->autoneg & AUTONEG_SPEED) {
6009                 cmd->autoneg = AUTONEG_ENABLE;
6010         }
6011         else {
6012                 cmd->autoneg = AUTONEG_DISABLE;
6013         }
6014
6015         if (netif_carrier_ok(dev)) {
6016                 cmd->speed = bp->line_speed;
6017                 cmd->duplex = bp->duplex;
6018         }
6019         else {
6020                 cmd->speed = -1;
6021                 cmd->duplex = -1;
6022         }
6023         spin_unlock_bh(&bp->phy_lock);
6024
6025         cmd->transceiver = XCVR_INTERNAL;
6026         cmd->phy_address = bp->phy_addr;
6027
6028         return 0;
6029 }
6030
6031 static int
6032 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6033 {
6034         struct bnx2 *bp = netdev_priv(dev);
6035         u8 autoneg = bp->autoneg;
6036         u8 req_duplex = bp->req_duplex;
6037         u16 req_line_speed = bp->req_line_speed;
6038         u32 advertising = bp->advertising;
6039         int err = -EINVAL;
6040
6041         spin_lock_bh(&bp->phy_lock);
6042
6043         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6044                 goto err_out_unlock;
6045
6046         if (cmd->port != bp->phy_port &&
6047             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6048                 goto err_out_unlock;
6049
6050         if (cmd->autoneg == AUTONEG_ENABLE) {
6051                 autoneg |= AUTONEG_SPEED;
6052
6053                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6054
6055                 /* allow advertising 1 speed */
6056                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6057                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6058                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6059                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6060
6061                         if (cmd->port == PORT_FIBRE)
6062                                 goto err_out_unlock;
6063
6064                         advertising = cmd->advertising;
6065
6066                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6067                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6068                             (cmd->port == PORT_TP))
6069                                 goto err_out_unlock;
6070                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6071                         advertising = cmd->advertising;
6072                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6073                         goto err_out_unlock;
6074                 else {
6075                         if (cmd->port == PORT_FIBRE)
6076                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6077                         else
6078                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6079                 }
6080                 advertising |= ADVERTISED_Autoneg;
6081         }
6082         else {
6083                 if (cmd->port == PORT_FIBRE) {
6084                         if ((cmd->speed != SPEED_1000 &&
6085                              cmd->speed != SPEED_2500) ||
6086                             (cmd->duplex != DUPLEX_FULL))
6087                                 goto err_out_unlock;
6088
6089                         if (cmd->speed == SPEED_2500 &&
6090                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6091                                 goto err_out_unlock;
6092                 }
6093                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6094                         goto err_out_unlock;
6095
6096                 autoneg &= ~AUTONEG_SPEED;
6097                 req_line_speed = cmd->speed;
6098                 req_duplex = cmd->duplex;
6099                 advertising = 0;
6100         }
6101
6102         bp->autoneg = autoneg;
6103         bp->advertising = advertising;
6104         bp->req_line_speed = req_line_speed;
6105         bp->req_duplex = req_duplex;
6106
6107         err = bnx2_setup_phy(bp, cmd->port);
6108
6109 err_out_unlock:
6110         spin_unlock_bh(&bp->phy_lock);
6111
6112         return err;
6113 }
6114
6115 static void
6116 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6117 {
6118         struct bnx2 *bp = netdev_priv(dev);
6119
6120         strcpy(info->driver, DRV_MODULE_NAME);
6121         strcpy(info->version, DRV_MODULE_VERSION);
6122         strcpy(info->bus_info, pci_name(bp->pdev));
6123         strcpy(info->fw_version, bp->fw_version);
6124 }
6125
6126 #define BNX2_REGDUMP_LEN                (32 * 1024)
6127
6128 static int
6129 bnx2_get_regs_len(struct net_device *dev)
6130 {
6131         return BNX2_REGDUMP_LEN;
6132 }
6133
6134 static void
6135 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6136 {
6137         u32 *p = _p, i, offset;
6138         u8 *orig_p = _p;
6139         struct bnx2 *bp = netdev_priv(dev);
6140         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6141                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6142                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6143                                  0x1040, 0x1048, 0x1080, 0x10a4,
6144                                  0x1400, 0x1490, 0x1498, 0x14f0,
6145                                  0x1500, 0x155c, 0x1580, 0x15dc,
6146                                  0x1600, 0x1658, 0x1680, 0x16d8,
6147                                  0x1800, 0x1820, 0x1840, 0x1854,
6148                                  0x1880, 0x1894, 0x1900, 0x1984,
6149                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6150                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6151                                  0x2000, 0x2030, 0x23c0, 0x2400,
6152                                  0x2800, 0x2820, 0x2830, 0x2850,
6153                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6154                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6155                                  0x4080, 0x4090, 0x43c0, 0x4458,
6156                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6157                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6158                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6159                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6160                                  0x6800, 0x6848, 0x684c, 0x6860,
6161                                  0x6888, 0x6910, 0x8000 };
6162
6163         regs->version = 0;
6164
6165         memset(p, 0, BNX2_REGDUMP_LEN);
6166
6167         if (!netif_running(bp->dev))
6168                 return;
6169
6170         i = 0;
6171         offset = reg_boundaries[0];
6172         p += offset;
6173         while (offset < BNX2_REGDUMP_LEN) {
6174                 *p++ = REG_RD(bp, offset);
6175                 offset += 4;
6176                 if (offset == reg_boundaries[i + 1]) {
6177                         offset = reg_boundaries[i + 2];
6178                         p = (u32 *) (orig_p + offset);
6179                         i += 2;
6180                 }
6181         }
6182 }
6183
6184 static void
6185 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6186 {
6187         struct bnx2 *bp = netdev_priv(dev);
6188
6189         if (bp->flags & BNX2_FLAG_NO_WOL) {
6190                 wol->supported = 0;
6191                 wol->wolopts = 0;
6192         }
6193         else {
6194                 wol->supported = WAKE_MAGIC;
6195                 if (bp->wol)
6196                         wol->wolopts = WAKE_MAGIC;
6197                 else
6198                         wol->wolopts = 0;
6199         }
6200         memset(&wol->sopass, 0, sizeof(wol->sopass));
6201 }
6202
6203 static int
6204 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6205 {
6206         struct bnx2 *bp = netdev_priv(dev);
6207
6208         if (wol->wolopts & ~WAKE_MAGIC)
6209                 return -EINVAL;
6210
6211         if (wol->wolopts & WAKE_MAGIC) {
6212                 if (bp->flags & BNX2_FLAG_NO_WOL)
6213                         return -EINVAL;
6214
6215                 bp->wol = 1;
6216         }
6217         else {
6218                 bp->wol = 0;
6219         }
6220         return 0;
6221 }
6222
6223 static int
6224 bnx2_nway_reset(struct net_device *dev)
6225 {
6226         struct bnx2 *bp = netdev_priv(dev);
6227         u32 bmcr;
6228
6229         if (!(bp->autoneg & AUTONEG_SPEED)) {
6230                 return -EINVAL;
6231         }
6232
6233         spin_lock_bh(&bp->phy_lock);
6234
6235         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6236                 int rc;
6237
6238                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6239                 spin_unlock_bh(&bp->phy_lock);
6240                 return rc;
6241         }
6242
6243         /* Force a link down visible on the other side */
6244         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6245                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6246                 spin_unlock_bh(&bp->phy_lock);
6247
6248                 msleep(20);
6249
6250                 spin_lock_bh(&bp->phy_lock);
6251
6252                 bp->current_interval = SERDES_AN_TIMEOUT;
6253                 bp->serdes_an_pending = 1;
6254                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6255         }
6256
6257         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6258         bmcr &= ~BMCR_LOOPBACK;
6259         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6260
6261         spin_unlock_bh(&bp->phy_lock);
6262
6263         return 0;
6264 }
6265
6266 static int
6267 bnx2_get_eeprom_len(struct net_device *dev)
6268 {
6269         struct bnx2 *bp = netdev_priv(dev);
6270
6271         if (bp->flash_info == NULL)
6272                 return 0;
6273
6274         return (int) bp->flash_size;
6275 }
6276
6277 static int
6278 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6279                 u8 *eebuf)
6280 {
6281         struct bnx2 *bp = netdev_priv(dev);
6282         int rc;
6283
6284         /* parameters already validated in ethtool_get_eeprom */
6285
6286         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6287
6288         return rc;
6289 }
6290
6291 static int
6292 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6293                 u8 *eebuf)
6294 {
6295         struct bnx2 *bp = netdev_priv(dev);
6296         int rc;
6297
6298         /* parameters already validated in ethtool_set_eeprom */
6299
6300         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6301
6302         return rc;
6303 }
6304
6305 static int
6306 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6307 {
6308         struct bnx2 *bp = netdev_priv(dev);
6309
6310         memset(coal, 0, sizeof(struct ethtool_coalesce));
6311
6312         coal->rx_coalesce_usecs = bp->rx_ticks;
6313         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6314         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6315         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6316
6317         coal->tx_coalesce_usecs = bp->tx_ticks;
6318         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6319         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6320         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6321
6322         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6323
6324         return 0;
6325 }
6326
6327 static int
6328 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6329 {
6330         struct bnx2 *bp = netdev_priv(dev);
6331
6332         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6333         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6334
6335         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6336         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6337
6338         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6339         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6340
6341         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6342         if (bp->rx_quick_cons_trip_int > 0xff)
6343                 bp->rx_quick_cons_trip_int = 0xff;
6344
6345         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6346         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6347
6348         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6349         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6350
6351         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6352         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6353
6354         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6355         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6356                 0xff;
6357
6358         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6359         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6360                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6361                         bp->stats_ticks = USEC_PER_SEC;
6362         }
6363         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6364                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6365         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6366
6367         if (netif_running(bp->dev)) {
6368                 bnx2_netif_stop(bp);
6369                 bnx2_init_nic(bp);
6370                 bnx2_netif_start(bp);
6371         }
6372
6373         return 0;
6374 }
6375
6376 static void
6377 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6378 {
6379         struct bnx2 *bp = netdev_priv(dev);
6380
6381         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6382         ering->rx_mini_max_pending = 0;
6383         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6384
6385         ering->rx_pending = bp->rx_ring_size;
6386         ering->rx_mini_pending = 0;
6387         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6388
6389         ering->tx_max_pending = MAX_TX_DESC_CNT;
6390         ering->tx_pending = bp->tx_ring_size;
6391 }
6392
6393 static int
6394 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6395 {
6396         if (netif_running(bp->dev)) {
6397                 bnx2_netif_stop(bp);
6398                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6399                 bnx2_free_skbs(bp);
6400                 bnx2_free_mem(bp);
6401         }
6402
6403         bnx2_set_rx_ring_size(bp, rx);
6404         bp->tx_ring_size = tx;
6405
6406         if (netif_running(bp->dev)) {
6407                 int rc;
6408
6409                 rc = bnx2_alloc_mem(bp);
6410                 if (rc)
6411                         return rc;
6412                 bnx2_init_nic(bp);
6413                 bnx2_netif_start(bp);
6414         }
6415         return 0;
6416 }
6417
6418 static int
6419 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6420 {
6421         struct bnx2 *bp = netdev_priv(dev);
6422         int rc;
6423
6424         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6425                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6426                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6427
6428                 return -EINVAL;
6429         }
6430         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6431         return rc;
6432 }
6433
6434 static void
6435 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6436 {
6437         struct bnx2 *bp = netdev_priv(dev);
6438
6439         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6440         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6441         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6442 }
6443
6444 static int
6445 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6446 {
6447         struct bnx2 *bp = netdev_priv(dev);
6448
6449         bp->req_flow_ctrl = 0;
6450         if (epause->rx_pause)
6451                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6452         if (epause->tx_pause)
6453                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6454
6455         if (epause->autoneg) {
6456                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6457         }
6458         else {
6459                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6460         }
6461
6462         spin_lock_bh(&bp->phy_lock);
6463
6464         bnx2_setup_phy(bp, bp->phy_port);
6465
6466         spin_unlock_bh(&bp->phy_lock);
6467
6468         return 0;
6469 }
6470
6471 static u32
6472 bnx2_get_rx_csum(struct net_device *dev)
6473 {
6474         struct bnx2 *bp = netdev_priv(dev);
6475
6476         return bp->rx_csum;
6477 }
6478
6479 static int
6480 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6481 {
6482         struct bnx2 *bp = netdev_priv(dev);
6483
6484         bp->rx_csum = data;
6485         return 0;
6486 }
6487
6488 static int
6489 bnx2_set_tso(struct net_device *dev, u32 data)
6490 {
6491         struct bnx2 *bp = netdev_priv(dev);
6492
6493         if (data) {
6494                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6495                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6496                         dev->features |= NETIF_F_TSO6;
6497         } else
6498                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6499                                    NETIF_F_TSO_ECN);
6500         return 0;
6501 }
6502
6503 #define BNX2_NUM_STATS 46
6504
6505 static struct {
6506         char string[ETH_GSTRING_LEN];
6507 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6508         { "rx_bytes" },
6509         { "rx_error_bytes" },
6510         { "tx_bytes" },
6511         { "tx_error_bytes" },
6512         { "rx_ucast_packets" },
6513         { "rx_mcast_packets" },
6514         { "rx_bcast_packets" },
6515         { "tx_ucast_packets" },
6516         { "tx_mcast_packets" },
6517         { "tx_bcast_packets" },
6518         { "tx_mac_errors" },
6519         { "tx_carrier_errors" },
6520         { "rx_crc_errors" },
6521         { "rx_align_errors" },
6522         { "tx_single_collisions" },
6523         { "tx_multi_collisions" },
6524         { "tx_deferred" },
6525         { "tx_excess_collisions" },
6526         { "tx_late_collisions" },
6527         { "tx_total_collisions" },
6528         { "rx_fragments" },
6529         { "rx_jabbers" },
6530         { "rx_undersize_packets" },
6531         { "rx_oversize_packets" },
6532         { "rx_64_byte_packets" },
6533         { "rx_65_to_127_byte_packets" },
6534         { "rx_128_to_255_byte_packets" },
6535         { "rx_256_to_511_byte_packets" },
6536         { "rx_512_to_1023_byte_packets" },
6537         { "rx_1024_to_1522_byte_packets" },
6538         { "rx_1523_to_9022_byte_packets" },
6539         { "tx_64_byte_packets" },
6540         { "tx_65_to_127_byte_packets" },
6541         { "tx_128_to_255_byte_packets" },
6542         { "tx_256_to_511_byte_packets" },
6543         { "tx_512_to_1023_byte_packets" },
6544         { "tx_1024_to_1522_byte_packets" },
6545         { "tx_1523_to_9022_byte_packets" },
6546         { "rx_xon_frames" },
6547         { "rx_xoff_frames" },
6548         { "tx_xon_frames" },
6549         { "tx_xoff_frames" },
6550         { "rx_mac_ctrl_frames" },
6551         { "rx_filtered_packets" },
6552         { "rx_discards" },
6553         { "rx_fw_discards" },
6554 };
6555
6556 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6557
6558 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6559     STATS_OFFSET32(stat_IfHCInOctets_hi),
6560     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6561     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6562     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6563     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6564     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6565     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6566     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6567     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6568     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6569     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6570     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6571     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6572     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6573     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6574     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6575     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6576     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6577     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6578     STATS_OFFSET32(stat_EtherStatsCollisions),
6579     STATS_OFFSET32(stat_EtherStatsFragments),
6580     STATS_OFFSET32(stat_EtherStatsJabbers),
6581     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6582     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6583     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6584     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6585     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6586     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6587     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6588     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6589     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6590     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6591     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6592     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6593     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6594     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6595     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6596     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6597     STATS_OFFSET32(stat_XonPauseFramesReceived),
6598     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6599     STATS_OFFSET32(stat_OutXonSent),
6600     STATS_OFFSET32(stat_OutXoffSent),
6601     STATS_OFFSET32(stat_MacControlFramesReceived),
6602     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6603     STATS_OFFSET32(stat_IfInMBUFDiscards),
6604     STATS_OFFSET32(stat_FwRxDrop),
6605 };
6606
6607 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6608  * skipped because of errata.
6609  */
6610 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6611         8,0,8,8,8,8,8,8,8,8,
6612         4,0,4,4,4,4,4,4,4,4,
6613         4,4,4,4,4,4,4,4,4,4,
6614         4,4,4,4,4,4,4,4,4,4,
6615         4,4,4,4,4,4,
6616 };
6617
6618 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6619         8,0,8,8,8,8,8,8,8,8,
6620         4,4,4,4,4,4,4,4,4,4,
6621         4,4,4,4,4,4,4,4,4,4,
6622         4,4,4,4,4,4,4,4,4,4,
6623         4,4,4,4,4,4,
6624 };
6625
6626 #define BNX2_NUM_TESTS 6
6627
6628 static struct {
6629         char string[ETH_GSTRING_LEN];
6630 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6631         { "register_test (offline)" },
6632         { "memory_test (offline)" },
6633         { "loopback_test (offline)" },
6634         { "nvram_test (online)" },
6635         { "interrupt_test (online)" },
6636         { "link_test (online)" },
6637 };
6638
6639 static int
6640 bnx2_get_sset_count(struct net_device *dev, int sset)
6641 {
6642         switch (sset) {
6643         case ETH_SS_TEST:
6644                 return BNX2_NUM_TESTS;
6645         case ETH_SS_STATS:
6646                 return BNX2_NUM_STATS;
6647         default:
6648                 return -EOPNOTSUPP;
6649         }
6650 }
6651
6652 static void
6653 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6654 {
6655         struct bnx2 *bp = netdev_priv(dev);
6656
6657         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6658         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6659                 int i;
6660
6661                 bnx2_netif_stop(bp);
6662                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6663                 bnx2_free_skbs(bp);
6664
6665                 if (bnx2_test_registers(bp) != 0) {
6666                         buf[0] = 1;
6667                         etest->flags |= ETH_TEST_FL_FAILED;
6668                 }
6669                 if (bnx2_test_memory(bp) != 0) {
6670                         buf[1] = 1;
6671                         etest->flags |= ETH_TEST_FL_FAILED;
6672                 }
6673                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6674                         etest->flags |= ETH_TEST_FL_FAILED;
6675
6676                 if (!netif_running(bp->dev)) {
6677                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6678                 }
6679                 else {
6680                         bnx2_init_nic(bp);
6681                         bnx2_netif_start(bp);
6682                 }
6683
6684                 /* wait for link up */
6685                 for (i = 0; i < 7; i++) {
6686                         if (bp->link_up)
6687                                 break;
6688                         msleep_interruptible(1000);
6689                 }
6690         }
6691
6692         if (bnx2_test_nvram(bp) != 0) {
6693                 buf[3] = 1;
6694                 etest->flags |= ETH_TEST_FL_FAILED;
6695         }
6696         if (bnx2_test_intr(bp) != 0) {
6697                 buf[4] = 1;
6698                 etest->flags |= ETH_TEST_FL_FAILED;
6699         }
6700
6701         if (bnx2_test_link(bp) != 0) {
6702                 buf[5] = 1;
6703                 etest->flags |= ETH_TEST_FL_FAILED;
6704
6705         }
6706 }
6707
6708 static void
6709 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6710 {
6711         switch (stringset) {
6712         case ETH_SS_STATS:
6713                 memcpy(buf, bnx2_stats_str_arr,
6714                         sizeof(bnx2_stats_str_arr));
6715                 break;
6716         case ETH_SS_TEST:
6717                 memcpy(buf, bnx2_tests_str_arr,
6718                         sizeof(bnx2_tests_str_arr));
6719                 break;
6720         }
6721 }
6722
6723 static void
6724 bnx2_get_ethtool_stats(struct net_device *dev,
6725                 struct ethtool_stats *stats, u64 *buf)
6726 {
6727         struct bnx2 *bp = netdev_priv(dev);
6728         int i;
6729         u32 *hw_stats = (u32 *) bp->stats_blk;
6730         u8 *stats_len_arr = NULL;
6731
6732         if (hw_stats == NULL) {
6733                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6734                 return;
6735         }
6736
6737         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6738             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6739             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6740             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6741                 stats_len_arr = bnx2_5706_stats_len_arr;
6742         else
6743                 stats_len_arr = bnx2_5708_stats_len_arr;
6744
6745         for (i = 0; i < BNX2_NUM_STATS; i++) {
6746                 if (stats_len_arr[i] == 0) {
6747                         /* skip this counter */
6748                         buf[i] = 0;
6749                         continue;
6750                 }
6751                 if (stats_len_arr[i] == 4) {
6752                         /* 4-byte counter */
6753                         buf[i] = (u64)
6754                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6755                         continue;
6756                 }
6757                 /* 8-byte counter */
6758                 buf[i] = (((u64) *(hw_stats +
6759                                         bnx2_stats_offset_arr[i])) << 32) +
6760                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6761         }
6762 }
6763
6764 static int
6765 bnx2_phys_id(struct net_device *dev, u32 data)
6766 {
6767         struct bnx2 *bp = netdev_priv(dev);
6768         int i;
6769         u32 save;
6770
6771         if (data == 0)
6772                 data = 2;
6773
6774         save = REG_RD(bp, BNX2_MISC_CFG);
6775         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6776
6777         for (i = 0; i < (data * 2); i++) {
6778                 if ((i % 2) == 0) {
6779                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6780                 }
6781                 else {
6782                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6783                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6784                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6785                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6786                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6787                                 BNX2_EMAC_LED_TRAFFIC);
6788                 }
6789                 msleep_interruptible(500);
6790                 if (signal_pending(current))
6791                         break;
6792         }
6793         REG_WR(bp, BNX2_EMAC_LED, 0);
6794         REG_WR(bp, BNX2_MISC_CFG, save);
6795         return 0;
6796 }
6797
6798 static int
6799 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6800 {
6801         struct bnx2 *bp = netdev_priv(dev);
6802
6803         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6804                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6805         else
6806                 return (ethtool_op_set_tx_csum(dev, data));
6807 }
6808
6809 static const struct ethtool_ops bnx2_ethtool_ops = {
6810         .get_settings           = bnx2_get_settings,
6811         .set_settings           = bnx2_set_settings,
6812         .get_drvinfo            = bnx2_get_drvinfo,
6813         .get_regs_len           = bnx2_get_regs_len,
6814         .get_regs               = bnx2_get_regs,
6815         .get_wol                = bnx2_get_wol,
6816         .set_wol                = bnx2_set_wol,
6817         .nway_reset             = bnx2_nway_reset,
6818         .get_link               = ethtool_op_get_link,
6819         .get_eeprom_len         = bnx2_get_eeprom_len,
6820         .get_eeprom             = bnx2_get_eeprom,
6821         .set_eeprom             = bnx2_set_eeprom,
6822         .get_coalesce           = bnx2_get_coalesce,
6823         .set_coalesce           = bnx2_set_coalesce,
6824         .get_ringparam          = bnx2_get_ringparam,
6825         .set_ringparam          = bnx2_set_ringparam,
6826         .get_pauseparam         = bnx2_get_pauseparam,
6827         .set_pauseparam         = bnx2_set_pauseparam,
6828         .get_rx_csum            = bnx2_get_rx_csum,
6829         .set_rx_csum            = bnx2_set_rx_csum,
6830         .set_tx_csum            = bnx2_set_tx_csum,
6831         .set_sg                 = ethtool_op_set_sg,
6832         .set_tso                = bnx2_set_tso,
6833         .self_test              = bnx2_self_test,
6834         .get_strings            = bnx2_get_strings,
6835         .phys_id                = bnx2_phys_id,
6836         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6837         .get_sset_count         = bnx2_get_sset_count,
6838 };
6839
6840 /* Called with rtnl_lock */
6841 static int
6842 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6843 {
6844         struct mii_ioctl_data *data = if_mii(ifr);
6845         struct bnx2 *bp = netdev_priv(dev);
6846         int err;
6847
6848         switch(cmd) {
6849         case SIOCGMIIPHY:
6850                 data->phy_id = bp->phy_addr;
6851
6852                 /* fallthru */
6853         case SIOCGMIIREG: {
6854                 u32 mii_regval;
6855
6856                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6857                         return -EOPNOTSUPP;
6858
6859                 if (!netif_running(dev))
6860                         return -EAGAIN;
6861
6862                 spin_lock_bh(&bp->phy_lock);
6863                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6864                 spin_unlock_bh(&bp->phy_lock);
6865
6866                 data->val_out = mii_regval;
6867
6868                 return err;
6869         }
6870
6871         case SIOCSMIIREG:
6872                 if (!capable(CAP_NET_ADMIN))
6873                         return -EPERM;
6874
6875                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6876                         return -EOPNOTSUPP;
6877
6878                 if (!netif_running(dev))
6879                         return -EAGAIN;
6880
6881                 spin_lock_bh(&bp->phy_lock);
6882                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6883                 spin_unlock_bh(&bp->phy_lock);
6884
6885                 return err;
6886
6887         default:
6888                 /* do nothing */
6889                 break;
6890         }
6891         return -EOPNOTSUPP;
6892 }
6893
6894 /* Called with rtnl_lock */
6895 static int
6896 bnx2_change_mac_addr(struct net_device *dev, void *p)
6897 {
6898         struct sockaddr *addr = p;
6899         struct bnx2 *bp = netdev_priv(dev);
6900
6901         if (!is_valid_ether_addr(addr->sa_data))
6902                 return -EINVAL;
6903
6904         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6905         if (netif_running(dev))
6906                 bnx2_set_mac_addr(bp);
6907
6908         return 0;
6909 }
6910
6911 /* Called with rtnl_lock */
6912 static int
6913 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6914 {
6915         struct bnx2 *bp = netdev_priv(dev);
6916
6917         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6918                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6919                 return -EINVAL;
6920
6921         dev->mtu = new_mtu;
6922         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6923 }
6924
6925 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6926 static void
6927 poll_bnx2(struct net_device *dev)
6928 {
6929         struct bnx2 *bp = netdev_priv(dev);
6930
6931         disable_irq(bp->pdev->irq);
6932         bnx2_interrupt(bp->pdev->irq, dev);
6933         enable_irq(bp->pdev->irq);
6934 }
6935 #endif
6936
6937 static void __devinit
6938 bnx2_get_5709_media(struct bnx2 *bp)
6939 {
6940         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6941         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6942         u32 strap;
6943
6944         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6945                 return;
6946         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6947                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6948                 return;
6949         }
6950
6951         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6952                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6953         else
6954                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6955
6956         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6957                 switch (strap) {
6958                 case 0x4:
6959                 case 0x5:
6960                 case 0x6:
6961                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6962                         return;
6963                 }
6964         } else {
6965                 switch (strap) {
6966                 case 0x1:
6967                 case 0x2:
6968                 case 0x4:
6969                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
6970                         return;
6971                 }
6972         }
6973 }
6974
6975 static void __devinit
6976 bnx2_get_pci_speed(struct bnx2 *bp)
6977 {
6978         u32 reg;
6979
6980         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6981         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6982                 u32 clkreg;
6983
6984                 bp->flags |= BNX2_FLAG_PCIX;
6985
6986                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6987
6988                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6989                 switch (clkreg) {
6990                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6991                         bp->bus_speed_mhz = 133;
6992                         break;
6993
6994                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6995                         bp->bus_speed_mhz = 100;
6996                         break;
6997
6998                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6999                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7000                         bp->bus_speed_mhz = 66;
7001                         break;
7002
7003                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7004                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7005                         bp->bus_speed_mhz = 50;
7006                         break;
7007
7008                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7009                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7010                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7011                         bp->bus_speed_mhz = 33;
7012                         break;
7013                 }
7014         }
7015         else {
7016                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7017                         bp->bus_speed_mhz = 66;
7018                 else
7019                         bp->bus_speed_mhz = 33;
7020         }
7021
7022         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7023                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7024
7025 }
7026
7027 static int __devinit
7028 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7029 {
7030         struct bnx2 *bp;
7031         unsigned long mem_len;
7032         int rc, i, j;
7033         u32 reg;
7034         u64 dma_mask, persist_dma_mask;
7035
7036         SET_NETDEV_DEV(dev, &pdev->dev);
7037         bp = netdev_priv(dev);
7038
7039         bp->flags = 0;
7040         bp->phy_flags = 0;
7041
7042         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7043         rc = pci_enable_device(pdev);
7044         if (rc) {
7045                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7046                 goto err_out;
7047         }
7048
7049         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7050                 dev_err(&pdev->dev,
7051                         "Cannot find PCI device base address, aborting.\n");
7052                 rc = -ENODEV;
7053                 goto err_out_disable;
7054         }
7055
7056         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7057         if (rc) {
7058                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7059                 goto err_out_disable;
7060         }
7061
7062         pci_set_master(pdev);
7063
7064         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7065         if (bp->pm_cap == 0) {
7066                 dev_err(&pdev->dev,
7067                         "Cannot find power management capability, aborting.\n");
7068                 rc = -EIO;
7069                 goto err_out_release;
7070         }
7071
7072         bp->dev = dev;
7073         bp->pdev = pdev;
7074
7075         spin_lock_init(&bp->phy_lock);
7076         spin_lock_init(&bp->indirect_lock);
7077         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7078
7079         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7080         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7081         dev->mem_end = dev->mem_start + mem_len;
7082         dev->irq = pdev->irq;
7083
7084         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7085
7086         if (!bp->regview) {
7087                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7088                 rc = -ENOMEM;
7089                 goto err_out_release;
7090         }
7091
7092         /* Configure byte swap and enable write to the reg_window registers.
7093          * Rely on CPU to do target byte swapping on big endian systems
7094          * The chip's target access swapping will not swap all accesses
7095          */
7096         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7097                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7098                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7099
7100         bnx2_set_power_state(bp, PCI_D0);
7101
7102         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7103
7104         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7105                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7106                         dev_err(&pdev->dev,
7107                                 "Cannot find PCIE capability, aborting.\n");
7108                         rc = -EIO;
7109                         goto err_out_unmap;
7110                 }
7111                 bp->flags |= BNX2_FLAG_PCIE;
7112                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7113                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7114         } else {
7115                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7116                 if (bp->pcix_cap == 0) {
7117                         dev_err(&pdev->dev,
7118                                 "Cannot find PCIX capability, aborting.\n");
7119                         rc = -EIO;
7120                         goto err_out_unmap;
7121                 }
7122         }
7123
7124         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7125                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7126                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7127         }
7128
7129         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7130                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7131                         bp->flags |= BNX2_FLAG_MSI_CAP;
7132         }
7133
7134         /* 5708 cannot support DMA addresses > 40-bit.  */
7135         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7136                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7137         else
7138                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7139
7140         /* Configure DMA attributes. */
7141         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7142                 dev->features |= NETIF_F_HIGHDMA;
7143                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7144                 if (rc) {
7145                         dev_err(&pdev->dev,
7146                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7147                         goto err_out_unmap;
7148                 }
7149         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7150                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7151                 goto err_out_unmap;
7152         }
7153
7154         if (!(bp->flags & BNX2_FLAG_PCIE))
7155                 bnx2_get_pci_speed(bp);
7156
7157         /* 5706A0 may falsely detect SERR and PERR. */
7158         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7159                 reg = REG_RD(bp, PCI_COMMAND);
7160                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7161                 REG_WR(bp, PCI_COMMAND, reg);
7162         }
7163         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7164                 !(bp->flags & BNX2_FLAG_PCIX)) {
7165
7166                 dev_err(&pdev->dev,
7167                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7168                 goto err_out_unmap;
7169         }
7170
7171         bnx2_init_nvram(bp);
7172
7173         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7174
7175         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7176             BNX2_SHM_HDR_SIGNATURE_SIG) {
7177                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7178
7179                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7180         } else
7181                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7182
7183         /* Get the permanent MAC address.  First we need to make sure the
7184          * firmware is actually running.
7185          */
7186         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7187
7188         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7189             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7190                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7191                 rc = -ENODEV;
7192                 goto err_out_unmap;
7193         }
7194
7195         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7196         for (i = 0, j = 0; i < 3; i++) {
7197                 u8 num, k, skip0;
7198
7199                 num = (u8) (reg >> (24 - (i * 8)));
7200                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7201                         if (num >= k || !skip0 || k == 1) {
7202                                 bp->fw_version[j++] = (num / k) + '0';
7203                                 skip0 = 0;
7204                         }
7205                 }
7206                 if (i != 2)
7207                         bp->fw_version[j++] = '.';
7208         }
7209         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7210         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7211                 bp->wol = 1;
7212
7213         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7214                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7215
7216                 for (i = 0; i < 30; i++) {
7217                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7218                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7219                                 break;
7220                         msleep(10);
7221                 }
7222         }
7223         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7224         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7225         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7226             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7227                 int i;
7228                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7229
7230                 bp->fw_version[j++] = ' ';
7231                 for (i = 0; i < 3; i++) {
7232                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7233                         reg = swab32(reg);
7234                         memcpy(&bp->fw_version[j], &reg, 4);
7235                         j += 4;
7236                 }
7237         }
7238
7239         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7240         bp->mac_addr[0] = (u8) (reg >> 8);
7241         bp->mac_addr[1] = (u8) reg;
7242
7243         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7244         bp->mac_addr[2] = (u8) (reg >> 24);
7245         bp->mac_addr[3] = (u8) (reg >> 16);
7246         bp->mac_addr[4] = (u8) (reg >> 8);
7247         bp->mac_addr[5] = (u8) reg;
7248
7249         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7250
7251         bp->tx_ring_size = MAX_TX_DESC_CNT;
7252         bnx2_set_rx_ring_size(bp, 255);
7253
7254         bp->rx_csum = 1;
7255
7256         bp->tx_quick_cons_trip_int = 20;
7257         bp->tx_quick_cons_trip = 20;
7258         bp->tx_ticks_int = 80;
7259         bp->tx_ticks = 80;
7260
7261         bp->rx_quick_cons_trip_int = 6;
7262         bp->rx_quick_cons_trip = 6;
7263         bp->rx_ticks_int = 18;
7264         bp->rx_ticks = 18;
7265
7266         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7267
7268         bp->timer_interval =  HZ;
7269         bp->current_interval =  HZ;
7270
7271         bp->phy_addr = 1;
7272
7273         /* Disable WOL support if we are running on a SERDES chip. */
7274         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7275                 bnx2_get_5709_media(bp);
7276         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7277                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7278
7279         bp->phy_port = PORT_TP;
7280         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7281                 bp->phy_port = PORT_FIBRE;
7282                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7283                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7284                         bp->flags |= BNX2_FLAG_NO_WOL;
7285                         bp->wol = 0;
7286                 }
7287                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
7288                         bp->phy_addr = 2;
7289                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7290                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7291                 }
7292                 bnx2_init_remote_phy(bp);
7293
7294         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7295                    CHIP_NUM(bp) == CHIP_NUM_5708)
7296                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7297         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7298                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7299                   CHIP_REV(bp) == CHIP_REV_Bx))
7300                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7301
7302         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7303             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7304             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7305                 bp->flags |= BNX2_FLAG_NO_WOL;
7306                 bp->wol = 0;
7307         }
7308
7309         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7310                 bp->tx_quick_cons_trip_int =
7311                         bp->tx_quick_cons_trip;
7312                 bp->tx_ticks_int = bp->tx_ticks;
7313                 bp->rx_quick_cons_trip_int =
7314                         bp->rx_quick_cons_trip;
7315                 bp->rx_ticks_int = bp->rx_ticks;
7316                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7317                 bp->com_ticks_int = bp->com_ticks;
7318                 bp->cmd_ticks_int = bp->cmd_ticks;
7319         }
7320
7321         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7322          *
7323          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7324          * with byte enables disabled on the unused 32-bit word.  This is legal
7325          * but causes problems on the AMD 8132 which will eventually stop
7326          * responding after a while.
7327          *
7328          * AMD believes this incompatibility is unique to the 5706, and
7329          * prefers to locally disable MSI rather than globally disabling it.
7330          */
7331         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7332                 struct pci_dev *amd_8132 = NULL;
7333
7334                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7335                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7336                                                   amd_8132))) {
7337
7338                         if (amd_8132->revision >= 0x10 &&
7339                             amd_8132->revision <= 0x13) {
7340                                 disable_msi = 1;
7341                                 pci_dev_put(amd_8132);
7342                                 break;
7343                         }
7344                 }
7345         }
7346
7347         bnx2_set_default_link(bp);
7348         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7349
7350         init_timer(&bp->timer);
7351         bp->timer.expires = RUN_AT(bp->timer_interval);
7352         bp->timer.data = (unsigned long) bp;
7353         bp->timer.function = bnx2_timer;
7354
7355         return 0;
7356
7357 err_out_unmap:
7358         if (bp->regview) {
7359                 iounmap(bp->regview);
7360                 bp->regview = NULL;
7361         }
7362
7363 err_out_release:
7364         pci_release_regions(pdev);
7365
7366 err_out_disable:
7367         pci_disable_device(pdev);
7368         pci_set_drvdata(pdev, NULL);
7369
7370 err_out:
7371         return rc;
7372 }
7373
7374 static char * __devinit
7375 bnx2_bus_string(struct bnx2 *bp, char *str)
7376 {
7377         char *s = str;
7378
7379         if (bp->flags & BNX2_FLAG_PCIE) {
7380                 s += sprintf(s, "PCI Express");
7381         } else {
7382                 s += sprintf(s, "PCI");
7383                 if (bp->flags & BNX2_FLAG_PCIX)
7384                         s += sprintf(s, "-X");
7385                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7386                         s += sprintf(s, " 32-bit");
7387                 else
7388                         s += sprintf(s, " 64-bit");
7389                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7390         }
7391         return str;
7392 }
7393
7394 static void __devinit
7395 bnx2_init_napi(struct bnx2 *bp)
7396 {
7397         int i;
7398         struct bnx2_napi *bnapi;
7399
7400         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7401                 bnapi = &bp->bnx2_napi[i];
7402                 bnapi->bp = bp;
7403         }
7404         netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7405         netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7406                        64);
7407 }
7408
7409 static int __devinit
7410 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7411 {
7412         static int version_printed = 0;
7413         struct net_device *dev = NULL;
7414         struct bnx2 *bp;
7415         int rc;
7416         char str[40];
7417         DECLARE_MAC_BUF(mac);
7418
7419         if (version_printed++ == 0)
7420                 printk(KERN_INFO "%s", version);
7421
7422         /* dev zeroed in init_etherdev */
7423         dev = alloc_etherdev(sizeof(*bp));
7424
7425         if (!dev)
7426                 return -ENOMEM;
7427
7428         rc = bnx2_init_board(pdev, dev);
7429         if (rc < 0) {
7430                 free_netdev(dev);
7431                 return rc;
7432         }
7433
7434         dev->open = bnx2_open;
7435         dev->hard_start_xmit = bnx2_start_xmit;
7436         dev->stop = bnx2_close;
7437         dev->get_stats = bnx2_get_stats;
7438         dev->set_multicast_list = bnx2_set_rx_mode;
7439         dev->do_ioctl = bnx2_ioctl;
7440         dev->set_mac_address = bnx2_change_mac_addr;
7441         dev->change_mtu = bnx2_change_mtu;
7442         dev->tx_timeout = bnx2_tx_timeout;
7443         dev->watchdog_timeo = TX_TIMEOUT;
7444 #ifdef BCM_VLAN
7445         dev->vlan_rx_register = bnx2_vlan_rx_register;
7446 #endif
7447         dev->ethtool_ops = &bnx2_ethtool_ops;
7448
7449         bp = netdev_priv(dev);
7450         bnx2_init_napi(bp);
7451
7452 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7453         dev->poll_controller = poll_bnx2;
7454 #endif
7455
7456         pci_set_drvdata(pdev, dev);
7457
7458         memcpy(dev->dev_addr, bp->mac_addr, 6);
7459         memcpy(dev->perm_addr, bp->mac_addr, 6);
7460         bp->name = board_info[ent->driver_data].name;
7461
7462         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7463         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7464                 dev->features |= NETIF_F_IPV6_CSUM;
7465
7466 #ifdef BCM_VLAN
7467         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7468 #endif
7469         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7470         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7471                 dev->features |= NETIF_F_TSO6;
7472
7473         if ((rc = register_netdev(dev))) {
7474                 dev_err(&pdev->dev, "Cannot register net device\n");
7475                 if (bp->regview)
7476                         iounmap(bp->regview);
7477                 pci_release_regions(pdev);
7478                 pci_disable_device(pdev);
7479                 pci_set_drvdata(pdev, NULL);
7480                 free_netdev(dev);
7481                 return rc;
7482         }
7483
7484         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7485                 "IRQ %d, node addr %s\n",
7486                 dev->name,
7487                 bp->name,
7488                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7489                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7490                 bnx2_bus_string(bp, str),
7491                 dev->base_addr,
7492                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7493
7494         return 0;
7495 }
7496
7497 static void __devexit
7498 bnx2_remove_one(struct pci_dev *pdev)
7499 {
7500         struct net_device *dev = pci_get_drvdata(pdev);
7501         struct bnx2 *bp = netdev_priv(dev);
7502
7503         flush_scheduled_work();
7504
7505         unregister_netdev(dev);
7506
7507         if (bp->regview)
7508                 iounmap(bp->regview);
7509
7510         free_netdev(dev);
7511         pci_release_regions(pdev);
7512         pci_disable_device(pdev);
7513         pci_set_drvdata(pdev, NULL);
7514 }
7515
7516 static int
7517 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7518 {
7519         struct net_device *dev = pci_get_drvdata(pdev);
7520         struct bnx2 *bp = netdev_priv(dev);
7521         u32 reset_code;
7522
7523         /* PCI register 4 needs to be saved whether netif_running() or not.
7524          * MSI address and data need to be saved if using MSI and
7525          * netif_running().
7526          */
7527         pci_save_state(pdev);
7528         if (!netif_running(dev))
7529                 return 0;
7530
7531         flush_scheduled_work();
7532         bnx2_netif_stop(bp);
7533         netif_device_detach(dev);
7534         del_timer_sync(&bp->timer);
7535         if (bp->flags & BNX2_FLAG_NO_WOL)
7536                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7537         else if (bp->wol)
7538                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7539         else
7540                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7541         bnx2_reset_chip(bp, reset_code);
7542         bnx2_free_skbs(bp);
7543         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7544         return 0;
7545 }
7546
7547 static int
7548 bnx2_resume(struct pci_dev *pdev)
7549 {
7550         struct net_device *dev = pci_get_drvdata(pdev);
7551         struct bnx2 *bp = netdev_priv(dev);
7552
7553         pci_restore_state(pdev);
7554         if (!netif_running(dev))
7555                 return 0;
7556
7557         bnx2_set_power_state(bp, PCI_D0);
7558         netif_device_attach(dev);
7559         bnx2_init_nic(bp);
7560         bnx2_netif_start(bp);
7561         return 0;
7562 }
7563
7564 static struct pci_driver bnx2_pci_driver = {
7565         .name           = DRV_MODULE_NAME,
7566         .id_table       = bnx2_pci_tbl,
7567         .probe          = bnx2_init_one,
7568         .remove         = __devexit_p(bnx2_remove_one),
7569         .suspend        = bnx2_suspend,
7570         .resume         = bnx2_resume,
7571 };
7572
7573 static int __init bnx2_init(void)
7574 {
7575         return pci_register_driver(&bnx2_pci_driver);
7576 }
7577
7578 static void __exit bnx2_cleanup(void)
7579 {
7580         pci_unregister_driver(&bnx2_pci_driver);
7581 }
7582
7583 module_init(bnx2_init);
7584 module_exit(bnx2_cleanup);
7585
7586
7587