]> err.no Git - linux-2.6/blob - drivers/net/bnx2.c
[BNX2]: Restructure RX fast path handling.
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x8000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.6.9"
60 #define DRV_MODULE_RELDATE      "December 8, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423         atomic_inc(&bp->intr_sem);
424         bnx2_disable_int(bp);
425         synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431         bnx2_disable_int_sync(bp);
432         if (netif_running(bp->dev)) {
433                 napi_disable(&bp->napi);
434                 netif_tx_disable(bp->dev);
435                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436         }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442         if (atomic_dec_and_test(&bp->intr_sem)) {
443                 if (netif_running(bp->dev)) {
444                         netif_wake_queue(bp->dev);
445                         napi_enable(&bp->napi);
446                         bnx2_enable_int(bp);
447                 }
448         }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454         int i;
455
456         for (i = 0; i < bp->ctx_pages; i++) {
457                 if (bp->ctx_blk[i]) {
458                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459                                             bp->ctx_blk[i],
460                                             bp->ctx_blk_mapping[i]);
461                         bp->ctx_blk[i] = NULL;
462                 }
463         }
464         if (bp->status_blk) {
465                 pci_free_consistent(bp->pdev, bp->status_stats_size,
466                                     bp->status_blk, bp->status_blk_mapping);
467                 bp->status_blk = NULL;
468                 bp->stats_blk = NULL;
469         }
470         if (bp->tx_desc_ring) {
471                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472                                     bp->tx_desc_ring, bp->tx_desc_mapping);
473                 bp->tx_desc_ring = NULL;
474         }
475         kfree(bp->tx_buf_ring);
476         bp->tx_buf_ring = NULL;
477         for (i = 0; i < bp->rx_max_ring; i++) {
478                 if (bp->rx_desc_ring[i])
479                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
480                                             bp->rx_desc_ring[i],
481                                             bp->rx_desc_mapping[i]);
482                 bp->rx_desc_ring[i] = NULL;
483         }
484         vfree(bp->rx_buf_ring);
485         bp->rx_buf_ring = NULL;
486 }
487
488 static int
489 bnx2_alloc_mem(struct bnx2 *bp)
490 {
491         int i, status_blk_size;
492
493         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
494         if (bp->tx_buf_ring == NULL)
495                 return -ENOMEM;
496
497         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
498                                                 &bp->tx_desc_mapping);
499         if (bp->tx_desc_ring == NULL)
500                 goto alloc_mem_err;
501
502         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
503         if (bp->rx_buf_ring == NULL)
504                 goto alloc_mem_err;
505
506         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
507
508         for (i = 0; i < bp->rx_max_ring; i++) {
509                 bp->rx_desc_ring[i] =
510                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
511                                              &bp->rx_desc_mapping[i]);
512                 if (bp->rx_desc_ring[i] == NULL)
513                         goto alloc_mem_err;
514
515         }
516
517         /* Combine status and statistics blocks into one allocation. */
518         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519         bp->status_stats_size = status_blk_size +
520                                 sizeof(struct statistics_block);
521
522         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
523                                               &bp->status_blk_mapping);
524         if (bp->status_blk == NULL)
525                 goto alloc_mem_err;
526
527         memset(bp->status_blk, 0, bp->status_stats_size);
528
529         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
530                                   status_blk_size);
531
532         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
533
534         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536                 if (bp->ctx_pages == 0)
537                         bp->ctx_pages = 1;
538                 for (i = 0; i < bp->ctx_pages; i++) {
539                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
540                                                 BCM_PAGE_SIZE,
541                                                 &bp->ctx_blk_mapping[i]);
542                         if (bp->ctx_blk[i] == NULL)
543                                 goto alloc_mem_err;
544                 }
545         }
546         return 0;
547
548 alloc_mem_err:
549         bnx2_free_mem(bp);
550         return -ENOMEM;
551 }
552
553 static void
554 bnx2_report_fw_link(struct bnx2 *bp)
555 {
556         u32 fw_link_status = 0;
557
558         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559                 return;
560
561         if (bp->link_up) {
562                 u32 bmsr;
563
564                 switch (bp->line_speed) {
565                 case SPEED_10:
566                         if (bp->duplex == DUPLEX_HALF)
567                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
568                         else
569                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
570                         break;
571                 case SPEED_100:
572                         if (bp->duplex == DUPLEX_HALF)
573                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
574                         else
575                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
576                         break;
577                 case SPEED_1000:
578                         if (bp->duplex == DUPLEX_HALF)
579                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
580                         else
581                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
582                         break;
583                 case SPEED_2500:
584                         if (bp->duplex == DUPLEX_HALF)
585                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
586                         else
587                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
588                         break;
589                 }
590
591                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
592
593                 if (bp->autoneg) {
594                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
595
596                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
598
599                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
602                         else
603                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
604                 }
605         }
606         else
607                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
608
609         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
610 }
611
612 static char *
613 bnx2_xceiver_str(struct bnx2 *bp)
614 {
615         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
617                  "Copper"));
618 }
619
620 static void
621 bnx2_report_link(struct bnx2 *bp)
622 {
623         if (bp->link_up) {
624                 netif_carrier_on(bp->dev);
625                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626                        bnx2_xceiver_str(bp));
627
628                 printk("%d Mbps ", bp->line_speed);
629
630                 if (bp->duplex == DUPLEX_FULL)
631                         printk("full duplex");
632                 else
633                         printk("half duplex");
634
635                 if (bp->flow_ctrl) {
636                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
637                                 printk(", receive ");
638                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
639                                         printk("& transmit ");
640                         }
641                         else {
642                                 printk(", transmit ");
643                         }
644                         printk("flow control ON");
645                 }
646                 printk("\n");
647         }
648         else {
649                 netif_carrier_off(bp->dev);
650                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651                        bnx2_xceiver_str(bp));
652         }
653
654         bnx2_report_fw_link(bp);
655 }
656
657 static void
658 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
659 {
660         u32 local_adv, remote_adv;
661
662         bp->flow_ctrl = 0;
663         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
664                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
665
666                 if (bp->duplex == DUPLEX_FULL) {
667                         bp->flow_ctrl = bp->req_flow_ctrl;
668                 }
669                 return;
670         }
671
672         if (bp->duplex != DUPLEX_FULL) {
673                 return;
674         }
675
676         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
678                 u32 val;
679
680                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682                         bp->flow_ctrl |= FLOW_CTRL_TX;
683                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684                         bp->flow_ctrl |= FLOW_CTRL_RX;
685                 return;
686         }
687
688         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
690
691         if (bp->phy_flags & PHY_SERDES_FLAG) {
692                 u32 new_local_adv = 0;
693                 u32 new_remote_adv = 0;
694
695                 if (local_adv & ADVERTISE_1000XPAUSE)
696                         new_local_adv |= ADVERTISE_PAUSE_CAP;
697                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
699                 if (remote_adv & ADVERTISE_1000XPAUSE)
700                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
701                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
703
704                 local_adv = new_local_adv;
705                 remote_adv = new_remote_adv;
706         }
707
708         /* See Table 28B-3 of 802.3ab-1999 spec. */
709         if (local_adv & ADVERTISE_PAUSE_CAP) {
710                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
712                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
713                         }
714                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715                                 bp->flow_ctrl = FLOW_CTRL_RX;
716                         }
717                 }
718                 else {
719                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
720                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721                         }
722                 }
723         }
724         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
727
728                         bp->flow_ctrl = FLOW_CTRL_TX;
729                 }
730         }
731 }
732
733 static int
734 bnx2_5709s_linkup(struct bnx2 *bp)
735 {
736         u32 val, speed;
737
738         bp->link_up = 1;
739
740         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
743
744         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745                 bp->line_speed = bp->req_line_speed;
746                 bp->duplex = bp->req_duplex;
747                 return 0;
748         }
749         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
750         switch (speed) {
751                 case MII_BNX2_GP_TOP_AN_SPEED_10:
752                         bp->line_speed = SPEED_10;
753                         break;
754                 case MII_BNX2_GP_TOP_AN_SPEED_100:
755                         bp->line_speed = SPEED_100;
756                         break;
757                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759                         bp->line_speed = SPEED_1000;
760                         break;
761                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762                         bp->line_speed = SPEED_2500;
763                         break;
764         }
765         if (val & MII_BNX2_GP_TOP_AN_FD)
766                 bp->duplex = DUPLEX_FULL;
767         else
768                 bp->duplex = DUPLEX_HALF;
769         return 0;
770 }
771
772 static int
773 bnx2_5708s_linkup(struct bnx2 *bp)
774 {
775         u32 val;
776
777         bp->link_up = 1;
778         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780                 case BCM5708S_1000X_STAT1_SPEED_10:
781                         bp->line_speed = SPEED_10;
782                         break;
783                 case BCM5708S_1000X_STAT1_SPEED_100:
784                         bp->line_speed = SPEED_100;
785                         break;
786                 case BCM5708S_1000X_STAT1_SPEED_1G:
787                         bp->line_speed = SPEED_1000;
788                         break;
789                 case BCM5708S_1000X_STAT1_SPEED_2G5:
790                         bp->line_speed = SPEED_2500;
791                         break;
792         }
793         if (val & BCM5708S_1000X_STAT1_FD)
794                 bp->duplex = DUPLEX_FULL;
795         else
796                 bp->duplex = DUPLEX_HALF;
797
798         return 0;
799 }
800
801 static int
802 bnx2_5706s_linkup(struct bnx2 *bp)
803 {
804         u32 bmcr, local_adv, remote_adv, common;
805
806         bp->link_up = 1;
807         bp->line_speed = SPEED_1000;
808
809         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
810         if (bmcr & BMCR_FULLDPLX) {
811                 bp->duplex = DUPLEX_FULL;
812         }
813         else {
814                 bp->duplex = DUPLEX_HALF;
815         }
816
817         if (!(bmcr & BMCR_ANENABLE)) {
818                 return 0;
819         }
820
821         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
823
824         common = local_adv & remote_adv;
825         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
826
827                 if (common & ADVERTISE_1000XFULL) {
828                         bp->duplex = DUPLEX_FULL;
829                 }
830                 else {
831                         bp->duplex = DUPLEX_HALF;
832                 }
833         }
834
835         return 0;
836 }
837
838 static int
839 bnx2_copper_linkup(struct bnx2 *bp)
840 {
841         u32 bmcr;
842
843         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
844         if (bmcr & BMCR_ANENABLE) {
845                 u32 local_adv, remote_adv, common;
846
847                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
849
850                 common = local_adv & (remote_adv >> 2);
851                 if (common & ADVERTISE_1000FULL) {
852                         bp->line_speed = SPEED_1000;
853                         bp->duplex = DUPLEX_FULL;
854                 }
855                 else if (common & ADVERTISE_1000HALF) {
856                         bp->line_speed = SPEED_1000;
857                         bp->duplex = DUPLEX_HALF;
858                 }
859                 else {
860                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
862
863                         common = local_adv & remote_adv;
864                         if (common & ADVERTISE_100FULL) {
865                                 bp->line_speed = SPEED_100;
866                                 bp->duplex = DUPLEX_FULL;
867                         }
868                         else if (common & ADVERTISE_100HALF) {
869                                 bp->line_speed = SPEED_100;
870                                 bp->duplex = DUPLEX_HALF;
871                         }
872                         else if (common & ADVERTISE_10FULL) {
873                                 bp->line_speed = SPEED_10;
874                                 bp->duplex = DUPLEX_FULL;
875                         }
876                         else if (common & ADVERTISE_10HALF) {
877                                 bp->line_speed = SPEED_10;
878                                 bp->duplex = DUPLEX_HALF;
879                         }
880                         else {
881                                 bp->line_speed = 0;
882                                 bp->link_up = 0;
883                         }
884                 }
885         }
886         else {
887                 if (bmcr & BMCR_SPEED100) {
888                         bp->line_speed = SPEED_100;
889                 }
890                 else {
891                         bp->line_speed = SPEED_10;
892                 }
893                 if (bmcr & BMCR_FULLDPLX) {
894                         bp->duplex = DUPLEX_FULL;
895                 }
896                 else {
897                         bp->duplex = DUPLEX_HALF;
898                 }
899         }
900
901         return 0;
902 }
903
904 static int
905 bnx2_set_mac_link(struct bnx2 *bp)
906 {
907         u32 val;
908
909         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911                 (bp->duplex == DUPLEX_HALF)) {
912                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
913         }
914
915         /* Configure the EMAC mode register. */
916         val = REG_RD(bp, BNX2_EMAC_MODE);
917
918         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
919                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
920                 BNX2_EMAC_MODE_25G_MODE);
921
922         if (bp->link_up) {
923                 switch (bp->line_speed) {
924                         case SPEED_10:
925                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
927                                         break;
928                                 }
929                                 /* fall through */
930                         case SPEED_100:
931                                 val |= BNX2_EMAC_MODE_PORT_MII;
932                                 break;
933                         case SPEED_2500:
934                                 val |= BNX2_EMAC_MODE_25G_MODE;
935                                 /* fall through */
936                         case SPEED_1000:
937                                 val |= BNX2_EMAC_MODE_PORT_GMII;
938                                 break;
939                 }
940         }
941         else {
942                 val |= BNX2_EMAC_MODE_PORT_GMII;
943         }
944
945         /* Set the MAC to operate in the appropriate duplex mode. */
946         if (bp->duplex == DUPLEX_HALF)
947                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948         REG_WR(bp, BNX2_EMAC_MODE, val);
949
950         /* Enable/disable rx PAUSE. */
951         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
952
953         if (bp->flow_ctrl & FLOW_CTRL_RX)
954                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
956
957         /* Enable/disable tx PAUSE. */
958         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
960
961         if (bp->flow_ctrl & FLOW_CTRL_TX)
962                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
964
965         /* Acknowledge the interrupt. */
966         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967
968         return 0;
969 }
970
971 static void
972 bnx2_enable_bmsr1(struct bnx2 *bp)
973 {
974         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975             (CHIP_NUM(bp) == CHIP_NUM_5709))
976                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977                                MII_BNX2_BLK_ADDR_GP_STATUS);
978 }
979
980 static void
981 bnx2_disable_bmsr1(struct bnx2 *bp)
982 {
983         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984             (CHIP_NUM(bp) == CHIP_NUM_5709))
985                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987 }
988
989 static int
990 bnx2_test_and_enable_2g5(struct bnx2 *bp)
991 {
992         u32 up1;
993         int ret = 1;
994
995         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
996                 return 0;
997
998         if (bp->autoneg & AUTONEG_SPEED)
999                 bp->advertising |= ADVERTISED_2500baseX_Full;
1000
1001         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1003
1004         bnx2_read_phy(bp, bp->mii_up1, &up1);
1005         if (!(up1 & BCM5708S_UP1_2G5)) {
1006                 up1 |= BCM5708S_UP1_2G5;
1007                 bnx2_write_phy(bp, bp->mii_up1, up1);
1008                 ret = 0;
1009         }
1010
1011         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014
1015         return ret;
1016 }
1017
1018 static int
1019 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1020 {
1021         u32 up1;
1022         int ret = 0;
1023
1024         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025                 return 0;
1026
1027         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1029
1030         bnx2_read_phy(bp, bp->mii_up1, &up1);
1031         if (up1 & BCM5708S_UP1_2G5) {
1032                 up1 &= ~BCM5708S_UP1_2G5;
1033                 bnx2_write_phy(bp, bp->mii_up1, up1);
1034                 ret = 1;
1035         }
1036
1037         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040
1041         return ret;
1042 }
1043
1044 static void
1045 bnx2_enable_forced_2g5(struct bnx2 *bp)
1046 {
1047         u32 bmcr;
1048
1049         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1050                 return;
1051
1052         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1053                 u32 val;
1054
1055                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1057                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1061
1062                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1065
1066         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1067                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1069         }
1070
1071         if (bp->autoneg & AUTONEG_SPEED) {
1072                 bmcr &= ~BMCR_ANENABLE;
1073                 if (bp->req_duplex == DUPLEX_FULL)
1074                         bmcr |= BMCR_FULLDPLX;
1075         }
1076         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1077 }
1078
1079 static void
1080 bnx2_disable_forced_2g5(struct bnx2 *bp)
1081 {
1082         u32 bmcr;
1083
1084         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1085                 return;
1086
1087         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1088                 u32 val;
1089
1090                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1092                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1095
1096                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1099
1100         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1101                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1103         }
1104
1105         if (bp->autoneg & AUTONEG_SPEED)
1106                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1108 }
1109
1110 static int
1111 bnx2_set_link(struct bnx2 *bp)
1112 {
1113         u32 bmsr;
1114         u8 link_up;
1115
1116         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1117                 bp->link_up = 1;
1118                 return 0;
1119         }
1120
1121         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1122                 return 0;
1123
1124         link_up = bp->link_up;
1125
1126         bnx2_enable_bmsr1(bp);
1127         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129         bnx2_disable_bmsr1(bp);
1130
1131         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1133                 u32 val;
1134
1135                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136                 if (val & BNX2_EMAC_STATUS_LINK)
1137                         bmsr |= BMSR_LSTATUS;
1138                 else
1139                         bmsr &= ~BMSR_LSTATUS;
1140         }
1141
1142         if (bmsr & BMSR_LSTATUS) {
1143                 bp->link_up = 1;
1144
1145                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1146                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147                                 bnx2_5706s_linkup(bp);
1148                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149                                 bnx2_5708s_linkup(bp);
1150                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151                                 bnx2_5709s_linkup(bp);
1152                 }
1153                 else {
1154                         bnx2_copper_linkup(bp);
1155                 }
1156                 bnx2_resolve_flow_ctrl(bp);
1157         }
1158         else {
1159                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1160                     (bp->autoneg & AUTONEG_SPEED))
1161                         bnx2_disable_forced_2g5(bp);
1162
1163                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1164                 bp->link_up = 0;
1165         }
1166
1167         if (bp->link_up != link_up) {
1168                 bnx2_report_link(bp);
1169         }
1170
1171         bnx2_set_mac_link(bp);
1172
1173         return 0;
1174 }
1175
1176 static int
1177 bnx2_reset_phy(struct bnx2 *bp)
1178 {
1179         int i;
1180         u32 reg;
1181
1182         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1183
1184 #define PHY_RESET_MAX_WAIT 100
1185         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1186                 udelay(10);
1187
1188                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1189                 if (!(reg & BMCR_RESET)) {
1190                         udelay(20);
1191                         break;
1192                 }
1193         }
1194         if (i == PHY_RESET_MAX_WAIT) {
1195                 return -EBUSY;
1196         }
1197         return 0;
1198 }
1199
1200 static u32
1201 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1202 {
1203         u32 adv = 0;
1204
1205         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1207
1208                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209                         adv = ADVERTISE_1000XPAUSE;
1210                 }
1211                 else {
1212                         adv = ADVERTISE_PAUSE_CAP;
1213                 }
1214         }
1215         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217                         adv = ADVERTISE_1000XPSE_ASYM;
1218                 }
1219                 else {
1220                         adv = ADVERTISE_PAUSE_ASYM;
1221                 }
1222         }
1223         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1226                 }
1227                 else {
1228                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229                 }
1230         }
1231         return adv;
1232 }
1233
1234 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1235
1236 static int
1237 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1238 {
1239         u32 speed_arg = 0, pause_adv;
1240
1241         pause_adv = bnx2_phy_get_pause_adv(bp);
1242
1243         if (bp->autoneg & AUTONEG_SPEED) {
1244                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245                 if (bp->advertising & ADVERTISED_10baseT_Half)
1246                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247                 if (bp->advertising & ADVERTISED_10baseT_Full)
1248                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249                 if (bp->advertising & ADVERTISED_100baseT_Half)
1250                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251                 if (bp->advertising & ADVERTISED_100baseT_Full)
1252                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1257         } else {
1258                 if (bp->req_line_speed == SPEED_2500)
1259                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260                 else if (bp->req_line_speed == SPEED_1000)
1261                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262                 else if (bp->req_line_speed == SPEED_100) {
1263                         if (bp->req_duplex == DUPLEX_FULL)
1264                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1265                         else
1266                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267                 } else if (bp->req_line_speed == SPEED_10) {
1268                         if (bp->req_duplex == DUPLEX_FULL)
1269                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1270                         else
1271                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1272                 }
1273         }
1274
1275         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1279
1280         if (port == PORT_TP)
1281                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1283
1284         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1285
1286         spin_unlock_bh(&bp->phy_lock);
1287         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288         spin_lock_bh(&bp->phy_lock);
1289
1290         return 0;
1291 }
1292
1293 static int
1294 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1295 {
1296         u32 adv, bmcr;
1297         u32 new_adv = 0;
1298
1299         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300                 return (bnx2_setup_remote_phy(bp, port));
1301
1302         if (!(bp->autoneg & AUTONEG_SPEED)) {
1303                 u32 new_bmcr;
1304                 int force_link_down = 0;
1305
1306                 if (bp->req_line_speed == SPEED_2500) {
1307                         if (!bnx2_test_and_enable_2g5(bp))
1308                                 force_link_down = 1;
1309                 } else if (bp->req_line_speed == SPEED_1000) {
1310                         if (bnx2_test_and_disable_2g5(bp))
1311                                 force_link_down = 1;
1312                 }
1313                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1314                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1315
1316                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1317                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1318                 new_bmcr |= BMCR_SPEED1000;
1319
1320                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321                         if (bp->req_line_speed == SPEED_2500)
1322                                 bnx2_enable_forced_2g5(bp);
1323                         else if (bp->req_line_speed == SPEED_1000) {
1324                                 bnx2_disable_forced_2g5(bp);
1325                                 new_bmcr &= ~0x2000;
1326                         }
1327
1328                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1329                         if (bp->req_line_speed == SPEED_2500)
1330                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1331                         else
1332                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1333                 }
1334
1335                 if (bp->req_duplex == DUPLEX_FULL) {
1336                         adv |= ADVERTISE_1000XFULL;
1337                         new_bmcr |= BMCR_FULLDPLX;
1338                 }
1339                 else {
1340                         adv |= ADVERTISE_1000XHALF;
1341                         new_bmcr &= ~BMCR_FULLDPLX;
1342                 }
1343                 if ((new_bmcr != bmcr) || (force_link_down)) {
1344                         /* Force a link down visible on the other side */
1345                         if (bp->link_up) {
1346                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1347                                                ~(ADVERTISE_1000XFULL |
1348                                                  ADVERTISE_1000XHALF));
1349                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1350                                         BMCR_ANRESTART | BMCR_ANENABLE);
1351
1352                                 bp->link_up = 0;
1353                                 netif_carrier_off(bp->dev);
1354                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1355                                 bnx2_report_link(bp);
1356                         }
1357                         bnx2_write_phy(bp, bp->mii_adv, adv);
1358                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1359                 } else {
1360                         bnx2_resolve_flow_ctrl(bp);
1361                         bnx2_set_mac_link(bp);
1362                 }
1363                 return 0;
1364         }
1365
1366         bnx2_test_and_enable_2g5(bp);
1367
1368         if (bp->advertising & ADVERTISED_1000baseT_Full)
1369                 new_adv |= ADVERTISE_1000XFULL;
1370
1371         new_adv |= bnx2_phy_get_pause_adv(bp);
1372
1373         bnx2_read_phy(bp, bp->mii_adv, &adv);
1374         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1375
1376         bp->serdes_an_pending = 0;
1377         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378                 /* Force a link down visible on the other side */
1379                 if (bp->link_up) {
1380                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1381                         spin_unlock_bh(&bp->phy_lock);
1382                         msleep(20);
1383                         spin_lock_bh(&bp->phy_lock);
1384                 }
1385
1386                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1388                         BMCR_ANENABLE);
1389                 /* Speed up link-up time when the link partner
1390                  * does not autonegotiate which is very common
1391                  * in blade servers. Some blade servers use
1392                  * IPMI for kerboard input and it's important
1393                  * to minimize link disruptions. Autoneg. involves
1394                  * exchanging base pages plus 3 next pages and
1395                  * normally completes in about 120 msec.
1396                  */
1397                 bp->current_interval = SERDES_AN_TIMEOUT;
1398                 bp->serdes_an_pending = 1;
1399                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1400         } else {
1401                 bnx2_resolve_flow_ctrl(bp);
1402                 bnx2_set_mac_link(bp);
1403         }
1404
1405         return 0;
1406 }
1407
1408 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1409         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1410                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411                 (ADVERTISED_1000baseT_Full)
1412
1413 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1414         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1415         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1416         ADVERTISED_1000baseT_Full)
1417
1418 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1420
1421 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1422
1423 static void
1424 bnx2_set_default_remote_link(struct bnx2 *bp)
1425 {
1426         u32 link;
1427
1428         if (bp->phy_port == PORT_TP)
1429                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1430         else
1431                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1432
1433         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434                 bp->req_line_speed = 0;
1435                 bp->autoneg |= AUTONEG_SPEED;
1436                 bp->advertising = ADVERTISED_Autoneg;
1437                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438                         bp->advertising |= ADVERTISED_10baseT_Half;
1439                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440                         bp->advertising |= ADVERTISED_10baseT_Full;
1441                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442                         bp->advertising |= ADVERTISED_100baseT_Half;
1443                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444                         bp->advertising |= ADVERTISED_100baseT_Full;
1445                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446                         bp->advertising |= ADVERTISED_1000baseT_Full;
1447                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448                         bp->advertising |= ADVERTISED_2500baseX_Full;
1449         } else {
1450                 bp->autoneg = 0;
1451                 bp->advertising = 0;
1452                 bp->req_duplex = DUPLEX_FULL;
1453                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454                         bp->req_line_speed = SPEED_10;
1455                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456                                 bp->req_duplex = DUPLEX_HALF;
1457                 }
1458                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459                         bp->req_line_speed = SPEED_100;
1460                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461                                 bp->req_duplex = DUPLEX_HALF;
1462                 }
1463                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464                         bp->req_line_speed = SPEED_1000;
1465                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466                         bp->req_line_speed = SPEED_2500;
1467         }
1468 }
1469
1470 static void
1471 bnx2_set_default_link(struct bnx2 *bp)
1472 {
1473         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474                 return bnx2_set_default_remote_link(bp);
1475
1476         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477         bp->req_line_speed = 0;
1478         if (bp->phy_flags & PHY_SERDES_FLAG) {
1479                 u32 reg;
1480
1481                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1482
1483                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1486                         bp->autoneg = 0;
1487                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1488                         bp->req_duplex = DUPLEX_FULL;
1489                 }
1490         } else
1491                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1492 }
1493
1494 static void
1495 bnx2_send_heart_beat(struct bnx2 *bp)
1496 {
1497         u32 msg;
1498         u32 addr;
1499
1500         spin_lock(&bp->indirect_lock);
1501         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505         spin_unlock(&bp->indirect_lock);
1506 }
1507
1508 static void
1509 bnx2_remote_phy_event(struct bnx2 *bp)
1510 {
1511         u32 msg;
1512         u8 link_up = bp->link_up;
1513         u8 old_port;
1514
1515         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1516
1517         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518                 bnx2_send_heart_beat(bp);
1519
1520         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1521
1522         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1523                 bp->link_up = 0;
1524         else {
1525                 u32 speed;
1526
1527                 bp->link_up = 1;
1528                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529                 bp->duplex = DUPLEX_FULL;
1530                 switch (speed) {
1531                         case BNX2_LINK_STATUS_10HALF:
1532                                 bp->duplex = DUPLEX_HALF;
1533                         case BNX2_LINK_STATUS_10FULL:
1534                                 bp->line_speed = SPEED_10;
1535                                 break;
1536                         case BNX2_LINK_STATUS_100HALF:
1537                                 bp->duplex = DUPLEX_HALF;
1538                         case BNX2_LINK_STATUS_100BASE_T4:
1539                         case BNX2_LINK_STATUS_100FULL:
1540                                 bp->line_speed = SPEED_100;
1541                                 break;
1542                         case BNX2_LINK_STATUS_1000HALF:
1543                                 bp->duplex = DUPLEX_HALF;
1544                         case BNX2_LINK_STATUS_1000FULL:
1545                                 bp->line_speed = SPEED_1000;
1546                                 break;
1547                         case BNX2_LINK_STATUS_2500HALF:
1548                                 bp->duplex = DUPLEX_HALF;
1549                         case BNX2_LINK_STATUS_2500FULL:
1550                                 bp->line_speed = SPEED_2500;
1551                                 break;
1552                         default:
1553                                 bp->line_speed = 0;
1554                                 break;
1555                 }
1556
1557                 spin_lock(&bp->phy_lock);
1558                 bp->flow_ctrl = 0;
1559                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561                         if (bp->duplex == DUPLEX_FULL)
1562                                 bp->flow_ctrl = bp->req_flow_ctrl;
1563                 } else {
1564                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1566                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1568                 }
1569
1570                 old_port = bp->phy_port;
1571                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572                         bp->phy_port = PORT_FIBRE;
1573                 else
1574                         bp->phy_port = PORT_TP;
1575
1576                 if (old_port != bp->phy_port)
1577                         bnx2_set_default_link(bp);
1578
1579                 spin_unlock(&bp->phy_lock);
1580         }
1581         if (bp->link_up != link_up)
1582                 bnx2_report_link(bp);
1583
1584         bnx2_set_mac_link(bp);
1585 }
1586
1587 static int
1588 bnx2_set_remote_link(struct bnx2 *bp)
1589 {
1590         u32 evt_code;
1591
1592         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1593         switch (evt_code) {
1594                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595                         bnx2_remote_phy_event(bp);
1596                         break;
1597                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1598                 default:
1599                         bnx2_send_heart_beat(bp);
1600                         break;
1601         }
1602         return 0;
1603 }
1604
1605 static int
1606 bnx2_setup_copper_phy(struct bnx2 *bp)
1607 {
1608         u32 bmcr;
1609         u32 new_bmcr;
1610
1611         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1612
1613         if (bp->autoneg & AUTONEG_SPEED) {
1614                 u32 adv_reg, adv1000_reg;
1615                 u32 new_adv_reg = 0;
1616                 u32 new_adv1000_reg = 0;
1617
1618                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1619                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620                         ADVERTISE_PAUSE_ASYM);
1621
1622                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623                 adv1000_reg &= PHY_ALL_1000_SPEED;
1624
1625                 if (bp->advertising & ADVERTISED_10baseT_Half)
1626                         new_adv_reg |= ADVERTISE_10HALF;
1627                 if (bp->advertising & ADVERTISED_10baseT_Full)
1628                         new_adv_reg |= ADVERTISE_10FULL;
1629                 if (bp->advertising & ADVERTISED_100baseT_Half)
1630                         new_adv_reg |= ADVERTISE_100HALF;
1631                 if (bp->advertising & ADVERTISED_100baseT_Full)
1632                         new_adv_reg |= ADVERTISE_100FULL;
1633                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634                         new_adv1000_reg |= ADVERTISE_1000FULL;
1635
1636                 new_adv_reg |= ADVERTISE_CSMA;
1637
1638                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1639
1640                 if ((adv1000_reg != new_adv1000_reg) ||
1641                         (adv_reg != new_adv_reg) ||
1642                         ((bmcr & BMCR_ANENABLE) == 0)) {
1643
1644                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1645                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1646                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1647                                 BMCR_ANENABLE);
1648                 }
1649                 else if (bp->link_up) {
1650                         /* Flow ctrl may have changed from auto to forced */
1651                         /* or vice-versa. */
1652
1653                         bnx2_resolve_flow_ctrl(bp);
1654                         bnx2_set_mac_link(bp);
1655                 }
1656                 return 0;
1657         }
1658
1659         new_bmcr = 0;
1660         if (bp->req_line_speed == SPEED_100) {
1661                 new_bmcr |= BMCR_SPEED100;
1662         }
1663         if (bp->req_duplex == DUPLEX_FULL) {
1664                 new_bmcr |= BMCR_FULLDPLX;
1665         }
1666         if (new_bmcr != bmcr) {
1667                 u32 bmsr;
1668
1669                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1671
1672                 if (bmsr & BMSR_LSTATUS) {
1673                         /* Force link down */
1674                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1675                         spin_unlock_bh(&bp->phy_lock);
1676                         msleep(50);
1677                         spin_lock_bh(&bp->phy_lock);
1678
1679                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1681                 }
1682
1683                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1684
1685                 /* Normally, the new speed is setup after the link has
1686                  * gone down and up again. In some cases, link will not go
1687                  * down so we need to set up the new speed here.
1688                  */
1689                 if (bmsr & BMSR_LSTATUS) {
1690                         bp->line_speed = bp->req_line_speed;
1691                         bp->duplex = bp->req_duplex;
1692                         bnx2_resolve_flow_ctrl(bp);
1693                         bnx2_set_mac_link(bp);
1694                 }
1695         } else {
1696                 bnx2_resolve_flow_ctrl(bp);
1697                 bnx2_set_mac_link(bp);
1698         }
1699         return 0;
1700 }
1701
1702 static int
1703 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1704 {
1705         if (bp->loopback == MAC_LOOPBACK)
1706                 return 0;
1707
1708         if (bp->phy_flags & PHY_SERDES_FLAG) {
1709                 return (bnx2_setup_serdes_phy(bp, port));
1710         }
1711         else {
1712                 return (bnx2_setup_copper_phy(bp));
1713         }
1714 }
1715
1716 static int
1717 bnx2_init_5709s_phy(struct bnx2 *bp)
1718 {
1719         u32 val;
1720
1721         bp->mii_bmcr = MII_BMCR + 0x10;
1722         bp->mii_bmsr = MII_BMSR + 0x10;
1723         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724         bp->mii_adv = MII_ADVERTISE + 0x10;
1725         bp->mii_lpa = MII_LPA + 0x10;
1726         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1727
1728         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1730
1731         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1732         bnx2_reset_phy(bp);
1733
1734         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1735
1736         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1740
1741         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744                 val |= BCM5708S_UP1_2G5;
1745         else
1746                 val &= ~BCM5708S_UP1_2G5;
1747         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1748
1749         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1753
1754         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1755
1756         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1759
1760         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761
1762         return 0;
1763 }
1764
1765 static int
1766 bnx2_init_5708s_phy(struct bnx2 *bp)
1767 {
1768         u32 val;
1769
1770         bnx2_reset_phy(bp);
1771
1772         bp->mii_up1 = BCM5708S_UP1;
1773
1774         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777
1778         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1781
1782         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1785
1786         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788                 val |= BCM5708S_UP1_2G5;
1789                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1790         }
1791
1792         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1793             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1795                 /* increase tx signal amplitude */
1796                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797                                BCM5708S_BLK_ADDR_TX_MISC);
1798                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1802         }
1803
1804         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1805               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1806
1807         if (val) {
1808                 u32 is_backplane;
1809
1810                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1811                                           BNX2_SHARED_HW_CFG_CONFIG);
1812                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814                                        BCM5708S_BLK_ADDR_TX_MISC);
1815                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817                                        BCM5708S_BLK_ADDR_DIG);
1818                 }
1819         }
1820         return 0;
1821 }
1822
1823 static int
1824 bnx2_init_5706s_phy(struct bnx2 *bp)
1825 {
1826         bnx2_reset_phy(bp);
1827
1828         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1829
1830         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1832
1833         if (bp->dev->mtu > 1500) {
1834                 u32 val;
1835
1836                 /* Set extended packet length bit */
1837                 bnx2_write_phy(bp, 0x18, 0x7);
1838                 bnx2_read_phy(bp, 0x18, &val);
1839                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1840
1841                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842                 bnx2_read_phy(bp, 0x1c, &val);
1843                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1844         }
1845         else {
1846                 u32 val;
1847
1848                 bnx2_write_phy(bp, 0x18, 0x7);
1849                 bnx2_read_phy(bp, 0x18, &val);
1850                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1851
1852                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853                 bnx2_read_phy(bp, 0x1c, &val);
1854                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1855         }
1856
1857         return 0;
1858 }
1859
1860 static int
1861 bnx2_init_copper_phy(struct bnx2 *bp)
1862 {
1863         u32 val;
1864
1865         bnx2_reset_phy(bp);
1866
1867         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868                 bnx2_write_phy(bp, 0x18, 0x0c00);
1869                 bnx2_write_phy(bp, 0x17, 0x000a);
1870                 bnx2_write_phy(bp, 0x15, 0x310b);
1871                 bnx2_write_phy(bp, 0x17, 0x201f);
1872                 bnx2_write_phy(bp, 0x15, 0x9506);
1873                 bnx2_write_phy(bp, 0x17, 0x401f);
1874                 bnx2_write_phy(bp, 0x15, 0x14e2);
1875                 bnx2_write_phy(bp, 0x18, 0x0400);
1876         }
1877
1878         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1881                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1882                 val &= ~(1 << 8);
1883                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1884         }
1885
1886         if (bp->dev->mtu > 1500) {
1887                 /* Set extended packet length bit */
1888                 bnx2_write_phy(bp, 0x18, 0x7);
1889                 bnx2_read_phy(bp, 0x18, &val);
1890                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1891
1892                 bnx2_read_phy(bp, 0x10, &val);
1893                 bnx2_write_phy(bp, 0x10, val | 0x1);
1894         }
1895         else {
1896                 bnx2_write_phy(bp, 0x18, 0x7);
1897                 bnx2_read_phy(bp, 0x18, &val);
1898                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1899
1900                 bnx2_read_phy(bp, 0x10, &val);
1901                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1902         }
1903
1904         /* ethernet@wirespeed */
1905         bnx2_write_phy(bp, 0x18, 0x7007);
1906         bnx2_read_phy(bp, 0x18, &val);
1907         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1908         return 0;
1909 }
1910
1911
1912 static int
1913 bnx2_init_phy(struct bnx2 *bp)
1914 {
1915         u32 val;
1916         int rc = 0;
1917
1918         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1920
1921         bp->mii_bmcr = MII_BMCR;
1922         bp->mii_bmsr = MII_BMSR;
1923         bp->mii_bmsr1 = MII_BMSR;
1924         bp->mii_adv = MII_ADVERTISE;
1925         bp->mii_lpa = MII_LPA;
1926
1927         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1928
1929         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1930                 goto setup_phy;
1931
1932         bnx2_read_phy(bp, MII_PHYSID1, &val);
1933         bp->phy_id = val << 16;
1934         bnx2_read_phy(bp, MII_PHYSID2, &val);
1935         bp->phy_id |= val & 0xffff;
1936
1937         if (bp->phy_flags & PHY_SERDES_FLAG) {
1938                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939                         rc = bnx2_init_5706s_phy(bp);
1940                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941                         rc = bnx2_init_5708s_phy(bp);
1942                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943                         rc = bnx2_init_5709s_phy(bp);
1944         }
1945         else {
1946                 rc = bnx2_init_copper_phy(bp);
1947         }
1948
1949 setup_phy:
1950         if (!rc)
1951                 rc = bnx2_setup_phy(bp, bp->phy_port);
1952
1953         return rc;
1954 }
1955
1956 static int
1957 bnx2_set_mac_loopback(struct bnx2 *bp)
1958 {
1959         u32 mac_mode;
1960
1961         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1965         bp->link_up = 1;
1966         return 0;
1967 }
1968
1969 static int bnx2_test_link(struct bnx2 *);
1970
1971 static int
1972 bnx2_set_phy_loopback(struct bnx2 *bp)
1973 {
1974         u32 mac_mode;
1975         int rc, i;
1976
1977         spin_lock_bh(&bp->phy_lock);
1978         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1979                             BMCR_SPEED1000);
1980         spin_unlock_bh(&bp->phy_lock);
1981         if (rc)
1982                 return rc;
1983
1984         for (i = 0; i < 10; i++) {
1985                 if (bnx2_test_link(bp) == 0)
1986                         break;
1987                 msleep(100);
1988         }
1989
1990         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1993                       BNX2_EMAC_MODE_25G_MODE);
1994
1995         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1997         bp->link_up = 1;
1998         return 0;
1999 }
2000
2001 static int
2002 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2003 {
2004         int i;
2005         u32 val;
2006
2007         bp->fw_wr_seq++;
2008         msg_data |= bp->fw_wr_seq;
2009
2010         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2011
2012         /* wait for an acknowledgement. */
2013         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2014                 msleep(10);
2015
2016                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2017
2018                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2019                         break;
2020         }
2021         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2022                 return 0;
2023
2024         /* If we timed out, inform the firmware that this is the case. */
2025         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2026                 if (!silent)
2027                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2028                                             "%x\n", msg_data);
2029
2030                 msg_data &= ~BNX2_DRV_MSG_CODE;
2031                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2032
2033                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2034
2035                 return -EBUSY;
2036         }
2037
2038         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2039                 return -EIO;
2040
2041         return 0;
2042 }
2043
2044 static int
2045 bnx2_init_5709_context(struct bnx2 *bp)
2046 {
2047         int i, ret = 0;
2048         u32 val;
2049
2050         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051         val |= (BCM_PAGE_BITS - 8) << 16;
2052         REG_WR(bp, BNX2_CTX_COMMAND, val);
2053         for (i = 0; i < 10; i++) {
2054                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2056                         break;
2057                 udelay(2);
2058         }
2059         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2060                 return -EBUSY;
2061
2062         for (i = 0; i < bp->ctx_pages; i++) {
2063                 int j;
2064
2065                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069                        (u64) bp->ctx_blk_mapping[i] >> 32);
2070                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072                 for (j = 0; j < 10; j++) {
2073
2074                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2076                                 break;
2077                         udelay(5);
2078                 }
2079                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2080                         ret = -EBUSY;
2081                         break;
2082                 }
2083         }
2084         return ret;
2085 }
2086
2087 static void
2088 bnx2_init_context(struct bnx2 *bp)
2089 {
2090         u32 vcid;
2091
2092         vcid = 96;
2093         while (vcid) {
2094                 u32 vcid_addr, pcid_addr, offset;
2095                 int i;
2096
2097                 vcid--;
2098
2099                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2100                         u32 new_vcid;
2101
2102                         vcid_addr = GET_PCID_ADDR(vcid);
2103                         if (vcid & 0x8) {
2104                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2105                         }
2106                         else {
2107                                 new_vcid = vcid;
2108                         }
2109                         pcid_addr = GET_PCID_ADDR(new_vcid);
2110                 }
2111                 else {
2112                         vcid_addr = GET_CID_ADDR(vcid);
2113                         pcid_addr = vcid_addr;
2114                 }
2115
2116                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117                         vcid_addr += (i << PHY_CTX_SHIFT);
2118                         pcid_addr += (i << PHY_CTX_SHIFT);
2119
2120                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2121                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2122
2123                         /* Zero out the context. */
2124                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125                                 CTX_WR(bp, 0x00, offset, 0);
2126
2127                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129                 }
2130         }
2131 }
2132
2133 static int
2134 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2135 {
2136         u16 *good_mbuf;
2137         u32 good_mbuf_cnt;
2138         u32 val;
2139
2140         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2141         if (good_mbuf == NULL) {
2142                 printk(KERN_ERR PFX "Failed to allocate memory in "
2143                                     "bnx2_alloc_bad_rbuf\n");
2144                 return -ENOMEM;
2145         }
2146
2147         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2148                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2149
2150         good_mbuf_cnt = 0;
2151
2152         /* Allocate a bunch of mbufs and save the good ones in an array. */
2153         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2154         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2155                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2156
2157                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2158
2159                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2160
2161                 /* The addresses with Bit 9 set are bad memory blocks. */
2162                 if (!(val & (1 << 9))) {
2163                         good_mbuf[good_mbuf_cnt] = (u16) val;
2164                         good_mbuf_cnt++;
2165                 }
2166
2167                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2168         }
2169
2170         /* Free the good ones back to the mbuf pool thus discarding
2171          * all the bad ones. */
2172         while (good_mbuf_cnt) {
2173                 good_mbuf_cnt--;
2174
2175                 val = good_mbuf[good_mbuf_cnt];
2176                 val = (val << 9) | val | 1;
2177
2178                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2179         }
2180         kfree(good_mbuf);
2181         return 0;
2182 }
2183
2184 static void
2185 bnx2_set_mac_addr(struct bnx2 *bp)
2186 {
2187         u32 val;
2188         u8 *mac_addr = bp->dev->dev_addr;
2189
2190         val = (mac_addr[0] << 8) | mac_addr[1];
2191
2192         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2193
2194         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2195                 (mac_addr[4] << 8) | mac_addr[5];
2196
2197         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2198 }
2199
2200 static inline int
2201 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2202 {
2203         struct sk_buff *skb;
2204         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2205         dma_addr_t mapping;
2206         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2207         unsigned long align;
2208
2209         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2210         if (skb == NULL) {
2211                 return -ENOMEM;
2212         }
2213
2214         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2215                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2216
2217         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2218                 PCI_DMA_FROMDEVICE);
2219
2220         rx_buf->skb = skb;
2221         pci_unmap_addr_set(rx_buf, mapping, mapping);
2222
2223         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2224         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2225
2226         bp->rx_prod_bseq += bp->rx_buf_use_size;
2227
2228         return 0;
2229 }
2230
2231 static int
2232 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2233 {
2234         struct status_block *sblk = bp->status_blk;
2235         u32 new_link_state, old_link_state;
2236         int is_set = 1;
2237
2238         new_link_state = sblk->status_attn_bits & event;
2239         old_link_state = sblk->status_attn_bits_ack & event;
2240         if (new_link_state != old_link_state) {
2241                 if (new_link_state)
2242                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2243                 else
2244                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2245         } else
2246                 is_set = 0;
2247
2248         return is_set;
2249 }
2250
2251 static void
2252 bnx2_phy_int(struct bnx2 *bp)
2253 {
2254         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2255                 spin_lock(&bp->phy_lock);
2256                 bnx2_set_link(bp);
2257                 spin_unlock(&bp->phy_lock);
2258         }
2259         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2260                 bnx2_set_remote_link(bp);
2261
2262 }
2263
2264 static void
2265 bnx2_tx_int(struct bnx2 *bp)
2266 {
2267         struct status_block *sblk = bp->status_blk;
2268         u16 hw_cons, sw_cons, sw_ring_cons;
2269         int tx_free_bd = 0;
2270
2271         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2272         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2273                 hw_cons++;
2274         }
2275         sw_cons = bp->tx_cons;
2276
2277         while (sw_cons != hw_cons) {
2278                 struct sw_bd *tx_buf;
2279                 struct sk_buff *skb;
2280                 int i, last;
2281
2282                 sw_ring_cons = TX_RING_IDX(sw_cons);
2283
2284                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2285                 skb = tx_buf->skb;
2286
2287                 /* partial BD completions possible with TSO packets */
2288                 if (skb_is_gso(skb)) {
2289                         u16 last_idx, last_ring_idx;
2290
2291                         last_idx = sw_cons +
2292                                 skb_shinfo(skb)->nr_frags + 1;
2293                         last_ring_idx = sw_ring_cons +
2294                                 skb_shinfo(skb)->nr_frags + 1;
2295                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2296                                 last_idx++;
2297                         }
2298                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2299                                 break;
2300                         }
2301                 }
2302
2303                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2304                         skb_headlen(skb), PCI_DMA_TODEVICE);
2305
2306                 tx_buf->skb = NULL;
2307                 last = skb_shinfo(skb)->nr_frags;
2308
2309                 for (i = 0; i < last; i++) {
2310                         sw_cons = NEXT_TX_BD(sw_cons);
2311
2312                         pci_unmap_page(bp->pdev,
2313                                 pci_unmap_addr(
2314                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2315                                         mapping),
2316                                 skb_shinfo(skb)->frags[i].size,
2317                                 PCI_DMA_TODEVICE);
2318                 }
2319
2320                 sw_cons = NEXT_TX_BD(sw_cons);
2321
2322                 tx_free_bd += last + 1;
2323
2324                 dev_kfree_skb(skb);
2325
2326                 hw_cons = bp->hw_tx_cons =
2327                         sblk->status_tx_quick_consumer_index0;
2328
2329                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2330                         hw_cons++;
2331                 }
2332         }
2333
2334         bp->tx_cons = sw_cons;
2335         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2336          * before checking for netif_queue_stopped().  Without the
2337          * memory barrier, there is a small possibility that bnx2_start_xmit()
2338          * will miss it and cause the queue to be stopped forever.
2339          */
2340         smp_mb();
2341
2342         if (unlikely(netif_queue_stopped(bp->dev)) &&
2343                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2344                 netif_tx_lock(bp->dev);
2345                 if ((netif_queue_stopped(bp->dev)) &&
2346                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2347                         netif_wake_queue(bp->dev);
2348                 netif_tx_unlock(bp->dev);
2349         }
2350 }
2351
2352 static inline void
2353 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2354         u16 cons, u16 prod)
2355 {
2356         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2357         struct rx_bd *cons_bd, *prod_bd;
2358
2359         cons_rx_buf = &bp->rx_buf_ring[cons];
2360         prod_rx_buf = &bp->rx_buf_ring[prod];
2361
2362         pci_dma_sync_single_for_device(bp->pdev,
2363                 pci_unmap_addr(cons_rx_buf, mapping),
2364                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2365
2366         bp->rx_prod_bseq += bp->rx_buf_use_size;
2367
2368         prod_rx_buf->skb = skb;
2369
2370         if (cons == prod)
2371                 return;
2372
2373         pci_unmap_addr_set(prod_rx_buf, mapping,
2374                         pci_unmap_addr(cons_rx_buf, mapping));
2375
2376         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2377         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2378         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2379         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2380 }
2381
2382 static int
2383 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2384             dma_addr_t dma_addr, u32 ring_idx)
2385 {
2386         int err;
2387         u16 prod = ring_idx & 0xffff;
2388
2389         err = bnx2_alloc_rx_skb(bp, prod);
2390         if (unlikely(err)) {
2391                 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2392                 return err;
2393         }
2394
2395         skb_reserve(skb, bp->rx_offset);
2396         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2397                          PCI_DMA_FROMDEVICE);
2398
2399         skb_put(skb, len);
2400         return 0;
2401 }
2402
2403 static inline u16
2404 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2405 {
2406         u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2407
2408         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2409                 cons++;
2410         return cons;
2411 }
2412
2413 static int
2414 bnx2_rx_int(struct bnx2 *bp, int budget)
2415 {
2416         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2417         struct l2_fhdr *rx_hdr;
2418         int rx_pkt = 0;
2419
2420         hw_cons = bnx2_get_hw_rx_cons(bp);
2421         sw_cons = bp->rx_cons;
2422         sw_prod = bp->rx_prod;
2423
2424         /* Memory barrier necessary as speculative reads of the rx
2425          * buffer can be ahead of the index in the status block
2426          */
2427         rmb();
2428         while (sw_cons != hw_cons) {
2429                 unsigned int len;
2430                 u32 status;
2431                 struct sw_bd *rx_buf;
2432                 struct sk_buff *skb;
2433                 dma_addr_t dma_addr;
2434
2435                 sw_ring_cons = RX_RING_IDX(sw_cons);
2436                 sw_ring_prod = RX_RING_IDX(sw_prod);
2437
2438                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2439                 skb = rx_buf->skb;
2440
2441                 rx_buf->skb = NULL;
2442
2443                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2444
2445                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2446                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2447
2448                 rx_hdr = (struct l2_fhdr *) skb->data;
2449                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2450
2451                 if ((status = rx_hdr->l2_fhdr_status) &
2452                         (L2_FHDR_ERRORS_BAD_CRC |
2453                         L2_FHDR_ERRORS_PHY_DECODE |
2454                         L2_FHDR_ERRORS_ALIGNMENT |
2455                         L2_FHDR_ERRORS_TOO_SHORT |
2456                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2457
2458                         bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2459                         goto next_rx;
2460                 }
2461
2462                 /* Since we don't have a jumbo ring, copy small packets
2463                  * if mtu > 1500
2464                  */
2465                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2466                         struct sk_buff *new_skb;
2467
2468                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2469                         if (new_skb == NULL) {
2470                                 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2471                                                   sw_ring_prod);
2472                                 goto next_rx;
2473                         }
2474
2475                         /* aligned copy */
2476                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2477                                       new_skb->data, len + 2);
2478                         skb_reserve(new_skb, 2);
2479                         skb_put(new_skb, len);
2480
2481                         bnx2_reuse_rx_skb(bp, skb,
2482                                 sw_ring_cons, sw_ring_prod);
2483
2484                         skb = new_skb;
2485                 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2486                                     (sw_ring_cons << 16) | sw_ring_prod)))
2487                         goto next_rx;
2488
2489                 skb->protocol = eth_type_trans(skb, bp->dev);
2490
2491                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2492                         (ntohs(skb->protocol) != 0x8100)) {
2493
2494                         dev_kfree_skb(skb);
2495                         goto next_rx;
2496
2497                 }
2498
2499                 skb->ip_summed = CHECKSUM_NONE;
2500                 if (bp->rx_csum &&
2501                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2502                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2503
2504                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2505                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2506                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2507                 }
2508
2509 #ifdef BCM_VLAN
2510                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2511                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2512                                 rx_hdr->l2_fhdr_vlan_tag);
2513                 }
2514                 else
2515 #endif
2516                         netif_receive_skb(skb);
2517
2518                 bp->dev->last_rx = jiffies;
2519                 rx_pkt++;
2520
2521 next_rx:
2522                 sw_cons = NEXT_RX_BD(sw_cons);
2523                 sw_prod = NEXT_RX_BD(sw_prod);
2524
2525                 if ((rx_pkt == budget))
2526                         break;
2527
2528                 /* Refresh hw_cons to see if there is new work */
2529                 if (sw_cons == hw_cons) {
2530                         hw_cons = bnx2_get_hw_rx_cons(bp);
2531                         rmb();
2532                 }
2533         }
2534         bp->rx_cons = sw_cons;
2535         bp->rx_prod = sw_prod;
2536
2537         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2538
2539         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2540
2541         mmiowb();
2542
2543         return rx_pkt;
2544
2545 }
2546
2547 /* MSI ISR - The only difference between this and the INTx ISR
2548  * is that the MSI interrupt is always serviced.
2549  */
2550 static irqreturn_t
2551 bnx2_msi(int irq, void *dev_instance)
2552 {
2553         struct net_device *dev = dev_instance;
2554         struct bnx2 *bp = netdev_priv(dev);
2555
2556         prefetch(bp->status_blk);
2557         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2558                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2559                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2560
2561         /* Return here if interrupt is disabled. */
2562         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2563                 return IRQ_HANDLED;
2564
2565         netif_rx_schedule(dev, &bp->napi);
2566
2567         return IRQ_HANDLED;
2568 }
2569
2570 static irqreturn_t
2571 bnx2_msi_1shot(int irq, void *dev_instance)
2572 {
2573         struct net_device *dev = dev_instance;
2574         struct bnx2 *bp = netdev_priv(dev);
2575
2576         prefetch(bp->status_blk);
2577
2578         /* Return here if interrupt is disabled. */
2579         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2580                 return IRQ_HANDLED;
2581
2582         netif_rx_schedule(dev, &bp->napi);
2583
2584         return IRQ_HANDLED;
2585 }
2586
2587 static irqreturn_t
2588 bnx2_interrupt(int irq, void *dev_instance)
2589 {
2590         struct net_device *dev = dev_instance;
2591         struct bnx2 *bp = netdev_priv(dev);
2592         struct status_block *sblk = bp->status_blk;
2593
2594         /* When using INTx, it is possible for the interrupt to arrive
2595          * at the CPU before the status block posted prior to the
2596          * interrupt. Reading a register will flush the status block.
2597          * When using MSI, the MSI message will always complete after
2598          * the status block write.
2599          */
2600         if ((sblk->status_idx == bp->last_status_idx) &&
2601             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2602              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2603                 return IRQ_NONE;
2604
2605         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2606                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2607                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2608
2609         /* Read back to deassert IRQ immediately to avoid too many
2610          * spurious interrupts.
2611          */
2612         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2613
2614         /* Return here if interrupt is shared and is disabled. */
2615         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2616                 return IRQ_HANDLED;
2617
2618         if (netif_rx_schedule_prep(dev, &bp->napi)) {
2619                 bp->last_status_idx = sblk->status_idx;
2620                 __netif_rx_schedule(dev, &bp->napi);
2621         }
2622
2623         return IRQ_HANDLED;
2624 }
2625
2626 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2627                                  STATUS_ATTN_BITS_TIMER_ABORT)
2628
2629 static inline int
2630 bnx2_has_work(struct bnx2 *bp)
2631 {
2632         struct status_block *sblk = bp->status_blk;
2633
2634         if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2635             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2636                 return 1;
2637
2638         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2639             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2640                 return 1;
2641
2642         return 0;
2643 }
2644
2645 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2646 {
2647         struct status_block *sblk = bp->status_blk;
2648         u32 status_attn_bits = sblk->status_attn_bits;
2649         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2650
2651         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2652             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2653
2654                 bnx2_phy_int(bp);
2655
2656                 /* This is needed to take care of transient status
2657                  * during link changes.
2658                  */
2659                 REG_WR(bp, BNX2_HC_COMMAND,
2660                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2661                 REG_RD(bp, BNX2_HC_COMMAND);
2662         }
2663
2664         if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2665                 bnx2_tx_int(bp);
2666
2667         if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2668                 work_done += bnx2_rx_int(bp, budget - work_done);
2669
2670         return work_done;
2671 }
2672
2673 static int bnx2_poll(struct napi_struct *napi, int budget)
2674 {
2675         struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2676         int work_done = 0;
2677         struct status_block *sblk = bp->status_blk;
2678
2679         while (1) {
2680                 work_done = bnx2_poll_work(bp, work_done, budget);
2681
2682                 if (unlikely(work_done >= budget))
2683                         break;
2684
2685                 /* bp->last_status_idx is used below to tell the hw how
2686                  * much work has been processed, so we must read it before
2687                  * checking for more work.
2688                  */
2689                 bp->last_status_idx = sblk->status_idx;
2690                 rmb();
2691                 if (likely(!bnx2_has_work(bp))) {
2692                         netif_rx_complete(bp->dev, napi);
2693                         if (likely(bp->flags & USING_MSI_FLAG)) {
2694                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2695                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2696                                        bp->last_status_idx);
2697                                 break;
2698                         }
2699                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2700                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2701                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2702                                bp->last_status_idx);
2703
2704                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2705                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2706                                bp->last_status_idx);
2707                         break;
2708                 }
2709         }
2710
2711         return work_done;
2712 }
2713
2714 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2715  * from set_multicast.
2716  */
2717 static void
2718 bnx2_set_rx_mode(struct net_device *dev)
2719 {
2720         struct bnx2 *bp = netdev_priv(dev);
2721         u32 rx_mode, sort_mode;
2722         int i;
2723
2724         spin_lock_bh(&bp->phy_lock);
2725
2726         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2727                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2728         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2729 #ifdef BCM_VLAN
2730         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2731                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2732 #else
2733         if (!(bp->flags & ASF_ENABLE_FLAG))
2734                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2735 #endif
2736         if (dev->flags & IFF_PROMISC) {
2737                 /* Promiscuous mode. */
2738                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2739                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2740                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2741         }
2742         else if (dev->flags & IFF_ALLMULTI) {
2743                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2744                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2745                                0xffffffff);
2746                 }
2747                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2748         }
2749         else {
2750                 /* Accept one or more multicast(s). */
2751                 struct dev_mc_list *mclist;
2752                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2753                 u32 regidx;
2754                 u32 bit;
2755                 u32 crc;
2756
2757                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2758
2759                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2760                      i++, mclist = mclist->next) {
2761
2762                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2763                         bit = crc & 0xff;
2764                         regidx = (bit & 0xe0) >> 5;
2765                         bit &= 0x1f;
2766                         mc_filter[regidx] |= (1 << bit);
2767                 }
2768
2769                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2770                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2771                                mc_filter[i]);
2772                 }
2773
2774                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2775         }
2776
2777         if (rx_mode != bp->rx_mode) {
2778                 bp->rx_mode = rx_mode;
2779                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2780         }
2781
2782         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2783         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2784         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2785
2786         spin_unlock_bh(&bp->phy_lock);
2787 }
2788
2789 static void
2790 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2791         u32 rv2p_proc)
2792 {
2793         int i;
2794         u32 val;
2795
2796
2797         for (i = 0; i < rv2p_code_len; i += 8) {
2798                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2799                 rv2p_code++;
2800                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2801                 rv2p_code++;
2802
2803                 if (rv2p_proc == RV2P_PROC1) {
2804                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2805                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2806                 }
2807                 else {
2808                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2809                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2810                 }
2811         }
2812
2813         /* Reset the processor, un-stall is done later. */
2814         if (rv2p_proc == RV2P_PROC1) {
2815                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2816         }
2817         else {
2818                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2819         }
2820 }
2821
2822 static int
2823 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2824 {
2825         u32 offset;
2826         u32 val;
2827         int rc;
2828
2829         /* Halt the CPU. */
2830         val = REG_RD_IND(bp, cpu_reg->mode);
2831         val |= cpu_reg->mode_value_halt;
2832         REG_WR_IND(bp, cpu_reg->mode, val);
2833         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2834
2835         /* Load the Text area. */
2836         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2837         if (fw->gz_text) {
2838                 int j;
2839
2840                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2841                                        fw->gz_text_len);
2842                 if (rc < 0)
2843                         return rc;
2844
2845                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2846                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2847                 }
2848         }
2849
2850         /* Load the Data area. */
2851         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2852         if (fw->data) {
2853                 int j;
2854
2855                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2856                         REG_WR_IND(bp, offset, fw->data[j]);
2857                 }
2858         }
2859
2860         /* Load the SBSS area. */
2861         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2862         if (fw->sbss_len) {
2863                 int j;
2864
2865                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2866                         REG_WR_IND(bp, offset, 0);
2867                 }
2868         }
2869
2870         /* Load the BSS area. */
2871         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2872         if (fw->bss_len) {
2873                 int j;
2874
2875                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2876                         REG_WR_IND(bp, offset, 0);
2877                 }
2878         }
2879
2880         /* Load the Read-Only area. */
2881         offset = cpu_reg->spad_base +
2882                 (fw->rodata_addr - cpu_reg->mips_view_base);
2883         if (fw->rodata) {
2884                 int j;
2885
2886                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2887                         REG_WR_IND(bp, offset, fw->rodata[j]);
2888                 }
2889         }
2890
2891         /* Clear the pre-fetch instruction. */
2892         REG_WR_IND(bp, cpu_reg->inst, 0);
2893         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2894
2895         /* Start the CPU. */
2896         val = REG_RD_IND(bp, cpu_reg->mode);
2897         val &= ~cpu_reg->mode_value_halt;
2898         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2899         REG_WR_IND(bp, cpu_reg->mode, val);
2900
2901         return 0;
2902 }
2903
2904 static int
2905 bnx2_init_cpus(struct bnx2 *bp)
2906 {
2907         struct cpu_reg cpu_reg;
2908         struct fw_info *fw;
2909         int rc;
2910         void *text;
2911
2912         /* Initialize the RV2P processor. */
2913         text = vmalloc(FW_BUF_SIZE);
2914         if (!text)
2915                 return -ENOMEM;
2916         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2917         if (rc < 0)
2918                 goto init_cpu_err;
2919
2920         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2921
2922         rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2923         if (rc < 0)
2924                 goto init_cpu_err;
2925
2926         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2927
2928         /* Initialize the RX Processor. */
2929         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2930         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2931         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2932         cpu_reg.state = BNX2_RXP_CPU_STATE;
2933         cpu_reg.state_value_clear = 0xffffff;
2934         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2935         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2936         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2937         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2938         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2939         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2940         cpu_reg.mips_view_base = 0x8000000;
2941
2942         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2943                 fw = &bnx2_rxp_fw_09;
2944         else
2945                 fw = &bnx2_rxp_fw_06;
2946
2947         fw->text = text;
2948         rc = load_cpu_fw(bp, &cpu_reg, fw);
2949         if (rc)
2950                 goto init_cpu_err;
2951
2952         /* Initialize the TX Processor. */
2953         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2954         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2955         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2956         cpu_reg.state = BNX2_TXP_CPU_STATE;
2957         cpu_reg.state_value_clear = 0xffffff;
2958         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2959         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2960         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2961         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2962         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2963         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2964         cpu_reg.mips_view_base = 0x8000000;
2965
2966         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2967                 fw = &bnx2_txp_fw_09;
2968         else
2969                 fw = &bnx2_txp_fw_06;
2970
2971         fw->text = text;
2972         rc = load_cpu_fw(bp, &cpu_reg, fw);
2973         if (rc)
2974                 goto init_cpu_err;
2975
2976         /* Initialize the TX Patch-up Processor. */
2977         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2978         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2979         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2980         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2981         cpu_reg.state_value_clear = 0xffffff;
2982         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2983         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2984         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2985         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2986         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2987         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2988         cpu_reg.mips_view_base = 0x8000000;
2989
2990         if (CHIP_NUM(bp) == CHIP_NUM_5709)
2991                 fw = &bnx2_tpat_fw_09;
2992         else
2993                 fw = &bnx2_tpat_fw_06;
2994
2995         fw->text = text;
2996         rc = load_cpu_fw(bp, &cpu_reg, fw);
2997         if (rc)
2998                 goto init_cpu_err;
2999
3000         /* Initialize the Completion Processor. */
3001         cpu_reg.mode = BNX2_COM_CPU_MODE;
3002         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3003         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3004         cpu_reg.state = BNX2_COM_CPU_STATE;
3005         cpu_reg.state_value_clear = 0xffffff;
3006         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3007         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3008         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3009         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3010         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3011         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3012         cpu_reg.mips_view_base = 0x8000000;
3013
3014         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015                 fw = &bnx2_com_fw_09;
3016         else
3017                 fw = &bnx2_com_fw_06;
3018
3019         fw->text = text;
3020         rc = load_cpu_fw(bp, &cpu_reg, fw);
3021         if (rc)
3022                 goto init_cpu_err;
3023
3024         /* Initialize the Command Processor. */
3025         cpu_reg.mode = BNX2_CP_CPU_MODE;
3026         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3027         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3028         cpu_reg.state = BNX2_CP_CPU_STATE;
3029         cpu_reg.state_value_clear = 0xffffff;
3030         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3031         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3032         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3033         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3034         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3035         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3036         cpu_reg.mips_view_base = 0x8000000;
3037
3038         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3039                 fw = &bnx2_cp_fw_09;
3040
3041                 fw->text = text;
3042                 rc = load_cpu_fw(bp, &cpu_reg, fw);
3043                 if (rc)
3044                         goto init_cpu_err;
3045         }
3046 init_cpu_err:
3047         vfree(text);
3048         return rc;
3049 }
3050
3051 static int
3052 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3053 {
3054         u16 pmcsr;
3055
3056         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3057
3058         switch (state) {
3059         case PCI_D0: {
3060                 u32 val;
3061
3062                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3063                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3064                         PCI_PM_CTRL_PME_STATUS);
3065
3066                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3067                         /* delay required during transition out of D3hot */
3068                         msleep(20);
3069
3070                 val = REG_RD(bp, BNX2_EMAC_MODE);
3071                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3072                 val &= ~BNX2_EMAC_MODE_MPKT;
3073                 REG_WR(bp, BNX2_EMAC_MODE, val);
3074
3075                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3076                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3077                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3078                 break;
3079         }
3080         case PCI_D3hot: {
3081                 int i;
3082                 u32 val, wol_msg;
3083
3084                 if (bp->wol) {
3085                         u32 advertising;
3086                         u8 autoneg;
3087
3088                         autoneg = bp->autoneg;
3089                         advertising = bp->advertising;
3090
3091                         if (bp->phy_port == PORT_TP) {
3092                                 bp->autoneg = AUTONEG_SPEED;
3093                                 bp->advertising = ADVERTISED_10baseT_Half |
3094                                         ADVERTISED_10baseT_Full |
3095                                         ADVERTISED_100baseT_Half |
3096                                         ADVERTISED_100baseT_Full |
3097                                         ADVERTISED_Autoneg;
3098                         }
3099
3100                         spin_lock_bh(&bp->phy_lock);
3101                         bnx2_setup_phy(bp, bp->phy_port);
3102                         spin_unlock_bh(&bp->phy_lock);
3103
3104                         bp->autoneg = autoneg;
3105                         bp->advertising = advertising;
3106
3107                         bnx2_set_mac_addr(bp);
3108
3109                         val = REG_RD(bp, BNX2_EMAC_MODE);
3110
3111                         /* Enable port mode. */
3112                         val &= ~BNX2_EMAC_MODE_PORT;
3113                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3114                                BNX2_EMAC_MODE_ACPI_RCVD |
3115                                BNX2_EMAC_MODE_MPKT;
3116                         if (bp->phy_port == PORT_TP)
3117                                 val |= BNX2_EMAC_MODE_PORT_MII;
3118                         else {
3119                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3120                                 if (bp->line_speed == SPEED_2500)
3121                                         val |= BNX2_EMAC_MODE_25G_MODE;
3122                         }
3123
3124                         REG_WR(bp, BNX2_EMAC_MODE, val);
3125
3126                         /* receive all multicast */
3127                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3128                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3129                                        0xffffffff);
3130                         }
3131                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3132                                BNX2_EMAC_RX_MODE_SORT_MODE);
3133
3134                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3135                               BNX2_RPM_SORT_USER0_MC_EN;
3136                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3137                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3138                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3139                                BNX2_RPM_SORT_USER0_ENA);
3140
3141                         /* Need to enable EMAC and RPM for WOL. */
3142                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3143                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3144                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3145                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3146
3147                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3148                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3149                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3150
3151                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3152                 }
3153                 else {
3154                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3155                 }
3156
3157                 if (!(bp->flags & NO_WOL_FLAG))
3158                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3159
3160                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3161                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3162                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3163
3164                         if (bp->wol)
3165                                 pmcsr |= 3;
3166                 }
3167                 else {
3168                         pmcsr |= 3;
3169                 }
3170                 if (bp->wol) {
3171                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3172                 }
3173                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3174                                       pmcsr);
3175
3176                 /* No more memory access after this point until
3177                  * device is brought back to D0.
3178                  */
3179                 udelay(50);
3180                 break;
3181         }
3182         default:
3183                 return -EINVAL;
3184         }
3185         return 0;
3186 }
3187
3188 static int
3189 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3190 {
3191         u32 val;
3192         int j;
3193
3194         /* Request access to the flash interface. */
3195         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3196         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3197                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3198                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3199                         break;
3200
3201                 udelay(5);
3202         }
3203
3204         if (j >= NVRAM_TIMEOUT_COUNT)
3205                 return -EBUSY;
3206
3207         return 0;
3208 }
3209
3210 static int
3211 bnx2_release_nvram_lock(struct bnx2 *bp)
3212 {
3213         int j;
3214         u32 val;
3215
3216         /* Relinquish nvram interface. */
3217         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3218
3219         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3220                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3221                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3222                         break;
3223
3224                 udelay(5);
3225         }
3226
3227         if (j >= NVRAM_TIMEOUT_COUNT)
3228                 return -EBUSY;
3229
3230         return 0;
3231 }
3232
3233
3234 static int
3235 bnx2_enable_nvram_write(struct bnx2 *bp)
3236 {
3237         u32 val;
3238
3239         val = REG_RD(bp, BNX2_MISC_CFG);
3240         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3241
3242         if (bp->flash_info->flags & BNX2_NV_WREN) {
3243                 int j;
3244
3245                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3246                 REG_WR(bp, BNX2_NVM_COMMAND,
3247                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3248
3249                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3250                         udelay(5);
3251
3252                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3253                         if (val & BNX2_NVM_COMMAND_DONE)
3254                                 break;
3255                 }
3256
3257                 if (j >= NVRAM_TIMEOUT_COUNT)
3258                         return -EBUSY;
3259         }
3260         return 0;
3261 }
3262
3263 static void
3264 bnx2_disable_nvram_write(struct bnx2 *bp)
3265 {
3266         u32 val;
3267
3268         val = REG_RD(bp, BNX2_MISC_CFG);
3269         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3270 }
3271
3272
3273 static void
3274 bnx2_enable_nvram_access(struct bnx2 *bp)
3275 {
3276         u32 val;
3277
3278         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3279         /* Enable both bits, even on read. */
3280         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3281                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3282 }
3283
3284 static void
3285 bnx2_disable_nvram_access(struct bnx2 *bp)
3286 {
3287         u32 val;
3288
3289         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290         /* Disable both bits, even after read. */
3291         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3292                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3293                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3294 }
3295
3296 static int
3297 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3298 {
3299         u32 cmd;
3300         int j;
3301
3302         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3303                 /* Buffered flash, no erase needed */
3304                 return 0;
3305
3306         /* Build an erase command */
3307         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3308               BNX2_NVM_COMMAND_DOIT;
3309
3310         /* Need to clear DONE bit separately. */
3311         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3312
3313         /* Address of the NVRAM to read from. */
3314         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3315
3316         /* Issue an erase command. */
3317         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3318
3319         /* Wait for completion. */
3320         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3321                 u32 val;
3322
3323                 udelay(5);
3324
3325                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3326                 if (val & BNX2_NVM_COMMAND_DONE)
3327                         break;
3328         }
3329
3330         if (j >= NVRAM_TIMEOUT_COUNT)
3331                 return -EBUSY;
3332
3333         return 0;
3334 }
3335
3336 static int
3337 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3338 {
3339         u32 cmd;
3340         int j;
3341
3342         /* Build the command word. */
3343         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3344
3345         /* Calculate an offset of a buffered flash, not needed for 5709. */
3346         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3347                 offset = ((offset / bp->flash_info->page_size) <<
3348                            bp->flash_info->page_bits) +
3349                           (offset % bp->flash_info->page_size);
3350         }
3351
3352         /* Need to clear DONE bit separately. */
3353         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3354
3355         /* Address of the NVRAM to read from. */
3356         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3357
3358         /* Issue a read command. */
3359         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3360
3361         /* Wait for completion. */
3362         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3363                 u32 val;
3364
3365                 udelay(5);
3366
3367                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3368                 if (val & BNX2_NVM_COMMAND_DONE) {
3369                         val = REG_RD(bp, BNX2_NVM_READ);
3370
3371                         val = be32_to_cpu(val);
3372                         memcpy(ret_val, &val, 4);
3373                         break;
3374                 }
3375         }
3376         if (j >= NVRAM_TIMEOUT_COUNT)
3377                 return -EBUSY;
3378
3379         return 0;
3380 }
3381
3382
3383 static int
3384 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3385 {
3386         u32 cmd, val32;
3387         int j;
3388
3389         /* Build the command word. */
3390         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3391
3392         /* Calculate an offset of a buffered flash, not needed for 5709. */
3393         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3394                 offset = ((offset / bp->flash_info->page_size) <<
3395                           bp->flash_info->page_bits) +
3396                          (offset % bp->flash_info->page_size);
3397         }
3398
3399         /* Need to clear DONE bit separately. */
3400         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3401
3402         memcpy(&val32, val, 4);
3403         val32 = cpu_to_be32(val32);
3404
3405         /* Write the data. */
3406         REG_WR(bp, BNX2_NVM_WRITE, val32);
3407
3408         /* Address of the NVRAM to write to. */
3409         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3410
3411         /* Issue the write command. */
3412         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3413
3414         /* Wait for completion. */
3415         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3416                 udelay(5);
3417
3418                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3419                         break;
3420         }
3421         if (j >= NVRAM_TIMEOUT_COUNT)
3422                 return -EBUSY;
3423
3424         return 0;
3425 }
3426
3427 static int
3428 bnx2_init_nvram(struct bnx2 *bp)
3429 {
3430         u32 val;
3431         int j, entry_count, rc = 0;
3432         struct flash_spec *flash;
3433
3434         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3435                 bp->flash_info = &flash_5709;
3436                 goto get_flash_size;
3437         }
3438
3439         /* Determine the selected interface. */
3440         val = REG_RD(bp, BNX2_NVM_CFG1);
3441
3442         entry_count = ARRAY_SIZE(flash_table);
3443
3444         if (val & 0x40000000) {
3445
3446                 /* Flash interface has been reconfigured */
3447                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3448                      j++, flash++) {
3449                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3450                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3451                                 bp->flash_info = flash;
3452                                 break;
3453                         }
3454                 }
3455         }
3456         else {
3457                 u32 mask;
3458                 /* Not yet been reconfigured */
3459
3460                 if (val & (1 << 23))
3461                         mask = FLASH_BACKUP_STRAP_MASK;
3462                 else
3463                         mask = FLASH_STRAP_MASK;
3464
3465                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3466                         j++, flash++) {
3467
3468                         if ((val & mask) == (flash->strapping & mask)) {
3469                                 bp->flash_info = flash;
3470
3471                                 /* Request access to the flash interface. */
3472                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3473                                         return rc;
3474
3475                                 /* Enable access to flash interface */
3476                                 bnx2_enable_nvram_access(bp);
3477
3478                                 /* Reconfigure the flash interface */
3479                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3480                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3481                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3482                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3483
3484                                 /* Disable access to flash interface */
3485                                 bnx2_disable_nvram_access(bp);
3486                                 bnx2_release_nvram_lock(bp);
3487
3488                                 break;
3489                         }
3490                 }
3491         } /* if (val & 0x40000000) */
3492
3493         if (j == entry_count) {
3494                 bp->flash_info = NULL;
3495                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3496                 return -ENODEV;
3497         }
3498
3499 get_flash_size:
3500         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3501         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3502         if (val)
3503                 bp->flash_size = val;
3504         else
3505                 bp->flash_size = bp->flash_info->total_size;
3506
3507         return rc;
3508 }
3509
3510 static int
3511 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3512                 int buf_size)
3513 {
3514         int rc = 0;
3515         u32 cmd_flags, offset32, len32, extra;
3516
3517         if (buf_size == 0)
3518                 return 0;
3519
3520         /* Request access to the flash interface. */
3521         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3522                 return rc;
3523
3524         /* Enable access to flash interface */
3525         bnx2_enable_nvram_access(bp);
3526
3527         len32 = buf_size;
3528         offset32 = offset;
3529         extra = 0;
3530
3531         cmd_flags = 0;
3532
3533         if (offset32 & 3) {
3534                 u8 buf[4];
3535                 u32 pre_len;
3536
3537                 offset32 &= ~3;
3538                 pre_len = 4 - (offset & 3);
3539
3540                 if (pre_len >= len32) {
3541                         pre_len = len32;
3542                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3543                                     BNX2_NVM_COMMAND_LAST;
3544                 }
3545                 else {
3546                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3547                 }
3548
3549                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3550
3551                 if (rc)
3552                         return rc;
3553
3554                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3555
3556                 offset32 += 4;
3557                 ret_buf += pre_len;
3558                 len32 -= pre_len;
3559         }
3560         if (len32 & 3) {
3561                 extra = 4 - (len32 & 3);
3562                 len32 = (len32 + 4) & ~3;
3563         }
3564
3565         if (len32 == 4) {
3566                 u8 buf[4];
3567
3568                 if (cmd_flags)
3569                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3570                 else
3571                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3572                                     BNX2_NVM_COMMAND_LAST;
3573
3574                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3575
3576                 memcpy(ret_buf, buf, 4 - extra);
3577         }
3578         else if (len32 > 0) {
3579                 u8 buf[4];
3580
3581                 /* Read the first word. */
3582                 if (cmd_flags)
3583                         cmd_flags = 0;
3584                 else
3585                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3586
3587                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3588
3589                 /* Advance to the next dword. */
3590                 offset32 += 4;
3591                 ret_buf += 4;
3592                 len32 -= 4;
3593
3594                 while (len32 > 4 && rc == 0) {
3595                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3596
3597                         /* Advance to the next dword. */
3598                         offset32 += 4;
3599                         ret_buf += 4;
3600                         len32 -= 4;
3601                 }
3602
3603                 if (rc)
3604                         return rc;
3605
3606                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3607                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3608
3609                 memcpy(ret_buf, buf, 4 - extra);
3610         }
3611
3612         /* Disable access to flash interface */
3613         bnx2_disable_nvram_access(bp);
3614
3615         bnx2_release_nvram_lock(bp);
3616
3617         return rc;
3618 }
3619
3620 static int
3621 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3622                 int buf_size)
3623 {
3624         u32 written, offset32, len32;
3625         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3626         int rc = 0;
3627         int align_start, align_end;
3628
3629         buf = data_buf;
3630         offset32 = offset;
3631         len32 = buf_size;
3632         align_start = align_end = 0;
3633
3634         if ((align_start = (offset32 & 3))) {
3635                 offset32 &= ~3;
3636                 len32 += align_start;
3637                 if (len32 < 4)
3638                         len32 = 4;
3639                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3640                         return rc;
3641         }
3642
3643         if (len32 & 3) {
3644                 align_end = 4 - (len32 & 3);
3645                 len32 += align_end;
3646                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3647                         return rc;
3648         }
3649
3650         if (align_start || align_end) {
3651                 align_buf = kmalloc(len32, GFP_KERNEL);
3652                 if (align_buf == NULL)
3653                         return -ENOMEM;
3654                 if (align_start) {
3655                         memcpy(align_buf, start, 4);
3656                 }
3657                 if (align_end) {
3658                         memcpy(align_buf + len32 - 4, end, 4);
3659                 }
3660                 memcpy(align_buf + align_start, data_buf, buf_size);
3661                 buf = align_buf;
3662         }
3663
3664         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3665                 flash_buffer = kmalloc(264, GFP_KERNEL);
3666                 if (flash_buffer == NULL) {
3667                         rc = -ENOMEM;
3668                         goto nvram_write_end;
3669                 }
3670         }
3671
3672         written = 0;
3673         while ((written < len32) && (rc == 0)) {
3674                 u32 page_start, page_end, data_start, data_end;
3675                 u32 addr, cmd_flags;
3676                 int i;
3677
3678                 /* Find the page_start addr */
3679                 page_start = offset32 + written;
3680                 page_start -= (page_start % bp->flash_info->page_size);
3681                 /* Find the page_end addr */
3682                 page_end = page_start + bp->flash_info->page_size;
3683                 /* Find the data_start addr */
3684                 data_start = (written == 0) ? offset32 : page_start;
3685                 /* Find the data_end addr */
3686                 data_end = (page_end > offset32 + len32) ?
3687                         (offset32 + len32) : page_end;
3688
3689                 /* Request access to the flash interface. */
3690                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3691                         goto nvram_write_end;
3692
3693                 /* Enable access to flash interface */
3694                 bnx2_enable_nvram_access(bp);
3695
3696                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3697                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3698                         int j;
3699
3700                         /* Read the whole page into the buffer
3701                          * (non-buffer flash only) */
3702                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3703                                 if (j == (bp->flash_info->page_size - 4)) {
3704                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3705                                 }
3706                                 rc = bnx2_nvram_read_dword(bp,
3707                                         page_start + j,
3708                                         &flash_buffer[j],
3709                                         cmd_flags);
3710
3711                                 if (rc)
3712                                         goto nvram_write_end;
3713
3714                                 cmd_flags = 0;
3715                         }
3716                 }
3717
3718                 /* Enable writes to flash interface (unlock write-protect) */
3719                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3720                         goto nvram_write_end;
3721
3722                 /* Loop to write back the buffer data from page_start to
3723                  * data_start */
3724                 i = 0;
3725                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3726                         /* Erase the page */
3727                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3728                                 goto nvram_write_end;
3729
3730                         /* Re-enable the write again for the actual write */
3731                         bnx2_enable_nvram_write(bp);
3732
3733                         for (addr = page_start; addr < data_start;
3734                                 addr += 4, i += 4) {
3735
3736                                 rc = bnx2_nvram_write_dword(bp, addr,
3737                                         &flash_buffer[i], cmd_flags);
3738
3739                                 if (rc != 0)
3740                                         goto nvram_write_end;
3741
3742                                 cmd_flags = 0;
3743                         }
3744                 }
3745
3746                 /* Loop to write the new data from data_start to data_end */
3747                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3748                         if ((addr == page_end - 4) ||
3749                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3750                                  (addr == data_end - 4))) {
3751
3752                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3753                         }
3754                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3755                                 cmd_flags);
3756
3757                         if (rc != 0)
3758                                 goto nvram_write_end;
3759
3760                         cmd_flags = 0;
3761                         buf += 4;
3762                 }
3763
3764                 /* Loop to write back the buffer data from data_end
3765                  * to page_end */
3766                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3767                         for (addr = data_end; addr < page_end;
3768                                 addr += 4, i += 4) {
3769
3770                                 if (addr == page_end-4) {
3771                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3772                                 }
3773                                 rc = bnx2_nvram_write_dword(bp, addr,
3774                                         &flash_buffer[i], cmd_flags);
3775
3776                                 if (rc != 0)
3777                                         goto nvram_write_end;
3778
3779                                 cmd_flags = 0;
3780                         }
3781                 }
3782
3783                 /* Disable writes to flash interface (lock write-protect) */
3784                 bnx2_disable_nvram_write(bp);
3785
3786                 /* Disable access to flash interface */
3787                 bnx2_disable_nvram_access(bp);
3788                 bnx2_release_nvram_lock(bp);
3789
3790                 /* Increment written */
3791                 written += data_end - data_start;
3792         }
3793
3794 nvram_write_end:
3795         kfree(flash_buffer);
3796         kfree(align_buf);
3797         return rc;
3798 }
3799
3800 static void
3801 bnx2_init_remote_phy(struct bnx2 *bp)
3802 {
3803         u32 val;
3804
3805         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3806         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3807                 return;
3808
3809         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3810         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3811                 return;
3812
3813         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3814                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3815
3816                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3817                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3818                         bp->phy_port = PORT_FIBRE;
3819                 else
3820                         bp->phy_port = PORT_TP;
3821
3822                 if (netif_running(bp->dev)) {
3823                         u32 sig;
3824
3825                         if (val & BNX2_LINK_STATUS_LINK_UP) {
3826                                 bp->link_up = 1;
3827                                 netif_carrier_on(bp->dev);
3828                         } else {
3829                                 bp->link_up = 0;
3830                                 netif_carrier_off(bp->dev);
3831                         }
3832                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3833                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3834                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3835                                    sig);
3836                 }
3837         }
3838 }
3839
3840 static int
3841 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3842 {
3843         u32 val;
3844         int i, rc = 0;
3845         u8 old_port;
3846
3847         /* Wait for the current PCI transaction to complete before
3848          * issuing a reset. */
3849         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3850                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3851                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3852                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3853                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3854         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3855         udelay(5);
3856
3857         /* Wait for the firmware to tell us it is ok to issue a reset. */
3858         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3859
3860         /* Deposit a driver reset signature so the firmware knows that
3861          * this is a soft reset. */
3862         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3863                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3864
3865         /* Do a dummy read to force the chip to complete all current transaction
3866          * before we issue a reset. */
3867         val = REG_RD(bp, BNX2_MISC_ID);
3868
3869         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3870                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3871                 REG_RD(bp, BNX2_MISC_COMMAND);
3872                 udelay(5);
3873
3874                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3875                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3876
3877                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3878
3879         } else {
3880                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3881                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3882                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3883
3884                 /* Chip reset. */
3885                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3886
3887                 /* Reading back any register after chip reset will hang the
3888                  * bus on 5706 A0 and A1.  The msleep below provides plenty
3889                  * of margin for write posting.
3890                  */
3891                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3892                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
3893                         msleep(20);
3894
3895                 /* Reset takes approximate 30 usec */
3896                 for (i = 0; i < 10; i++) {
3897                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3898                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3899                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3900                                 break;
3901                         udelay(10);
3902                 }
3903
3904                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3905                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3906                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3907                         return -EBUSY;
3908                 }
3909         }
3910
3911         /* Make sure byte swapping is properly configured. */
3912         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3913         if (val != 0x01020304) {
3914                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3915                 return -ENODEV;
3916         }
3917
3918         /* Wait for the firmware to finish its initialization. */
3919         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3920         if (rc)
3921                 return rc;
3922
3923         spin_lock_bh(&bp->phy_lock);
3924         old_port = bp->phy_port;
3925         bnx2_init_remote_phy(bp);
3926         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
3927                 bnx2_set_default_remote_link(bp);
3928         spin_unlock_bh(&bp->phy_lock);
3929
3930         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3931                 /* Adjust the voltage regular to two steps lower.  The default
3932                  * of this register is 0x0000000e. */
3933                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3934
3935                 /* Remove bad rbuf memory from the free pool. */
3936                 rc = bnx2_alloc_bad_rbuf(bp);
3937         }
3938
3939         return rc;
3940 }
3941
3942 static int
3943 bnx2_init_chip(struct bnx2 *bp)
3944 {
3945         u32 val;
3946         int rc;
3947
3948         /* Make sure the interrupt is not active. */
3949         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3950
3951         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3952               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3953 #ifdef __BIG_ENDIAN
3954               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3955 #endif
3956               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3957               DMA_READ_CHANS << 12 |
3958               DMA_WRITE_CHANS << 16;
3959
3960         val |= (0x2 << 20) | (1 << 11);
3961
3962         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3963                 val |= (1 << 23);
3964
3965         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3966             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3967                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3968
3969         REG_WR(bp, BNX2_DMA_CONFIG, val);
3970
3971         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3972                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3973                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3974                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3975         }
3976
3977         if (bp->flags & PCIX_FLAG) {
3978                 u16 val16;
3979
3980                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3981                                      &val16);
3982                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3983                                       val16 & ~PCI_X_CMD_ERO);
3984         }
3985
3986         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3987                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3988                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3989                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3990
3991         /* Initialize context mapping and zero out the quick contexts.  The
3992          * context block must have already been enabled. */
3993         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3994                 rc = bnx2_init_5709_context(bp);
3995                 if (rc)
3996                         return rc;
3997         } else
3998                 bnx2_init_context(bp);
3999
4000         if ((rc = bnx2_init_cpus(bp)) != 0)
4001                 return rc;
4002
4003         bnx2_init_nvram(bp);
4004
4005         bnx2_set_mac_addr(bp);
4006
4007         val = REG_RD(bp, BNX2_MQ_CONFIG);
4008         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4009         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4010         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4011                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4012
4013         REG_WR(bp, BNX2_MQ_CONFIG, val);
4014
4015         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4016         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4017         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4018
4019         val = (BCM_PAGE_BITS - 8) << 24;
4020         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4021
4022         /* Configure page size. */
4023         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4024         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4025         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4026         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4027
4028         val = bp->mac_addr[0] +
4029               (bp->mac_addr[1] << 8) +
4030               (bp->mac_addr[2] << 16) +
4031               bp->mac_addr[3] +
4032               (bp->mac_addr[4] << 8) +
4033               (bp->mac_addr[5] << 16);
4034         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4035
4036         /* Program the MTU.  Also include 4 bytes for CRC32. */
4037         val = bp->dev->mtu + ETH_HLEN + 4;
4038         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4039                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4040         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4041
4042         bp->last_status_idx = 0;
4043         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4044
4045         /* Set up how to generate a link change interrupt. */
4046         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4047
4048         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4049                (u64) bp->status_blk_mapping & 0xffffffff);
4050         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4051
4052         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4053                (u64) bp->stats_blk_mapping & 0xffffffff);
4054         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4055                (u64) bp->stats_blk_mapping >> 32);
4056
4057         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4058                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4059
4060         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4061                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4062
4063         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4064                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4065
4066         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4067
4068         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4069
4070         REG_WR(bp, BNX2_HC_COM_TICKS,
4071                (bp->com_ticks_int << 16) | bp->com_ticks);
4072
4073         REG_WR(bp, BNX2_HC_CMD_TICKS,
4074                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4075
4076         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4077                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4078         else
4079                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4080         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4081
4082         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4083                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4084         else {
4085                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4086                       BNX2_HC_CONFIG_COLLECT_STATS;
4087         }
4088
4089         if (bp->flags & ONE_SHOT_MSI_FLAG)
4090                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4091
4092         REG_WR(bp, BNX2_HC_CONFIG, val);
4093
4094         /* Clear internal stats counters. */
4095         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4096
4097         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4098
4099         /* Initialize the receive filter. */
4100         bnx2_set_rx_mode(bp->dev);
4101
4102         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4103                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4104                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4105                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4106         }
4107         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4108                           0);
4109
4110         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4111         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4112
4113         udelay(20);
4114
4115         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4116
4117         return rc;
4118 }
4119
4120 static void
4121 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4122 {
4123         u32 val, offset0, offset1, offset2, offset3;
4124
4125         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4126                 offset0 = BNX2_L2CTX_TYPE_XI;
4127                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4128                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4129                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4130         } else {
4131                 offset0 = BNX2_L2CTX_TYPE;
4132                 offset1 = BNX2_L2CTX_CMD_TYPE;
4133                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4134                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4135         }
4136         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4137         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4138
4139         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4140         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4141
4142         val = (u64) bp->tx_desc_mapping >> 32;
4143         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4144
4145         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4146         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4147 }
4148
4149 static void
4150 bnx2_init_tx_ring(struct bnx2 *bp)
4151 {
4152         struct tx_bd *txbd;
4153         u32 cid;
4154
4155         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4156
4157         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4158
4159         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4160         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4161
4162         bp->tx_prod = 0;
4163         bp->tx_cons = 0;
4164         bp->hw_tx_cons = 0;
4165         bp->tx_prod_bseq = 0;
4166
4167         cid = TX_CID;
4168         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4169         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4170
4171         bnx2_init_tx_context(bp, cid);
4172 }
4173
4174 static void
4175 bnx2_init_rx_ring(struct bnx2 *bp)
4176 {
4177         struct rx_bd *rxbd;
4178         int i;
4179         u16 prod, ring_prod;
4180         u32 val;
4181
4182         /* 8 for CRC and VLAN */
4183         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4184         /* hw alignment */
4185         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4186
4187         ring_prod = prod = bp->rx_prod = 0;
4188         bp->rx_cons = 0;
4189         bp->rx_prod_bseq = 0;
4190
4191         for (i = 0; i < bp->rx_max_ring; i++) {
4192                 int j;
4193
4194                 rxbd = &bp->rx_desc_ring[i][0];
4195                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4196                         rxbd->rx_bd_len = bp->rx_buf_use_size;
4197                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4198                 }
4199                 if (i == (bp->rx_max_ring - 1))
4200                         j = 0;
4201                 else
4202                         j = i + 1;
4203                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4204                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4205                                        0xffffffff;
4206         }
4207
4208         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4209         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4210         val |= 0x02 << 8;
4211         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4212
4213         val = (u64) bp->rx_desc_mapping[0] >> 32;
4214         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4215
4216         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4217         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4218
4219         for (i = 0; i < bp->rx_ring_size; i++) {
4220                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4221                         break;
4222                 }
4223                 prod = NEXT_RX_BD(prod);
4224                 ring_prod = RX_RING_IDX(prod);
4225         }
4226         bp->rx_prod = prod;
4227
4228         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4229
4230         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4231 }
4232
4233 static void
4234 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4235 {
4236         u32 num_rings, max;
4237
4238         bp->rx_ring_size = size;
4239         num_rings = 1;
4240         while (size > MAX_RX_DESC_CNT) {
4241                 size -= MAX_RX_DESC_CNT;
4242                 num_rings++;
4243         }
4244         /* round to next power of 2 */
4245         max = MAX_RX_RINGS;
4246         while ((max & num_rings) == 0)
4247                 max >>= 1;
4248
4249         if (num_rings != max)
4250                 max <<= 1;
4251
4252         bp->rx_max_ring = max;
4253         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4254 }
4255
4256 static void
4257 bnx2_free_tx_skbs(struct bnx2 *bp)
4258 {
4259         int i;
4260
4261         if (bp->tx_buf_ring == NULL)
4262                 return;
4263
4264         for (i = 0; i < TX_DESC_CNT; ) {
4265                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4266                 struct sk_buff *skb = tx_buf->skb;
4267                 int j, last;
4268
4269                 if (skb == NULL) {
4270                         i++;
4271                         continue;
4272                 }
4273
4274                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4275                         skb_headlen(skb), PCI_DMA_TODEVICE);
4276
4277                 tx_buf->skb = NULL;
4278
4279                 last = skb_shinfo(skb)->nr_frags;
4280                 for (j = 0; j < last; j++) {
4281                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4282                         pci_unmap_page(bp->pdev,
4283                                 pci_unmap_addr(tx_buf, mapping),
4284                                 skb_shinfo(skb)->frags[j].size,
4285                                 PCI_DMA_TODEVICE);
4286                 }
4287                 dev_kfree_skb(skb);
4288                 i += j + 1;
4289         }
4290
4291 }
4292
4293 static void
4294 bnx2_free_rx_skbs(struct bnx2 *bp)
4295 {
4296         int i;
4297
4298         if (bp->rx_buf_ring == NULL)
4299                 return;
4300
4301         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4302                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4303                 struct sk_buff *skb = rx_buf->skb;
4304
4305                 if (skb == NULL)
4306                         continue;
4307
4308                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4309                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4310
4311                 rx_buf->skb = NULL;
4312
4313                 dev_kfree_skb(skb);
4314         }
4315 }
4316
4317 static void
4318 bnx2_free_skbs(struct bnx2 *bp)
4319 {
4320         bnx2_free_tx_skbs(bp);
4321         bnx2_free_rx_skbs(bp);
4322 }
4323
4324 static int
4325 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4326 {
4327         int rc;
4328
4329         rc = bnx2_reset_chip(bp, reset_code);
4330         bnx2_free_skbs(bp);
4331         if (rc)
4332                 return rc;
4333
4334         if ((rc = bnx2_init_chip(bp)) != 0)
4335                 return rc;
4336
4337         bnx2_init_tx_ring(bp);
4338         bnx2_init_rx_ring(bp);
4339         return 0;
4340 }
4341
4342 static int
4343 bnx2_init_nic(struct bnx2 *bp)
4344 {
4345         int rc;
4346
4347         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4348                 return rc;
4349
4350         spin_lock_bh(&bp->phy_lock);
4351         bnx2_init_phy(bp);
4352         bnx2_set_link(bp);
4353         spin_unlock_bh(&bp->phy_lock);
4354         return 0;
4355 }
4356
4357 static int
4358 bnx2_test_registers(struct bnx2 *bp)
4359 {
4360         int ret;
4361         int i, is_5709;
4362         static const struct {
4363                 u16   offset;
4364                 u16   flags;
4365 #define BNX2_FL_NOT_5709        1
4366                 u32   rw_mask;
4367                 u32   ro_mask;
4368         } reg_tbl[] = {
4369                 { 0x006c, 0, 0x00000000, 0x0000003f },
4370                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4371                 { 0x0094, 0, 0x00000000, 0x00000000 },
4372
4373                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4374                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4375                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4376                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4377                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4378                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4379                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4380                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382
4383                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4384                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4385                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4386                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4387                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4388                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4389
4390                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4391                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4392                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4393
4394                 { 0x1000, 0, 0x00000000, 0x00000001 },
4395                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4396
4397                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4398                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4399                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4400                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4401                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4402                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4403                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4404                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4405                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4406                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4407
4408                 { 0x1800, 0, 0x00000000, 0x00000001 },
4409                 { 0x1804, 0, 0x00000000, 0x00000003 },
4410
4411                 { 0x2800, 0, 0x00000000, 0x00000001 },
4412                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4413                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4414                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4415                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4416                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4417                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4418                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4419                 { 0x2840, 0, 0x00000000, 0xffffffff },
4420                 { 0x2844, 0, 0x00000000, 0xffffffff },
4421                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4422                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4423
4424                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4425                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4426
4427                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4428                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4429                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4430                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4431                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4432                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4433                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4434                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4435                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4436
4437                 { 0x5004, 0, 0x00000000, 0x0000007f },
4438                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4439
4440                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4441                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4442                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4443                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4444                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4445                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4446                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4447                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4448                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4449
4450                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4451                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4452                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4453                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4454                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4455                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4456                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4457                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4458                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4459                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4460                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4461                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4462                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4463                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4464                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4465                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4466                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4467                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4468                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4469                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4470                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4471                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4472                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4473
4474                 { 0xffff, 0, 0x00000000, 0x00000000 },
4475         };
4476
4477         ret = 0;
4478         is_5709 = 0;
4479         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4480                 is_5709 = 1;
4481
4482         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4483                 u32 offset, rw_mask, ro_mask, save_val, val;
4484                 u16 flags = reg_tbl[i].flags;
4485
4486                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4487                         continue;
4488
4489                 offset = (u32) reg_tbl[i].offset;
4490                 rw_mask = reg_tbl[i].rw_mask;
4491                 ro_mask = reg_tbl[i].ro_mask;
4492
4493                 save_val = readl(bp->regview + offset);
4494
4495                 writel(0, bp->regview + offset);
4496
4497                 val = readl(bp->regview + offset);
4498                 if ((val & rw_mask) != 0) {
4499                         goto reg_test_err;
4500                 }
4501
4502                 if ((val & ro_mask) != (save_val & ro_mask)) {
4503                         goto reg_test_err;
4504                 }
4505
4506                 writel(0xffffffff, bp->regview + offset);
4507
4508                 val = readl(bp->regview + offset);
4509                 if ((val & rw_mask) != rw_mask) {
4510                         goto reg_test_err;
4511                 }
4512
4513                 if ((val & ro_mask) != (save_val & ro_mask)) {
4514                         goto reg_test_err;
4515                 }
4516
4517                 writel(save_val, bp->regview + offset);
4518                 continue;
4519
4520 reg_test_err:
4521                 writel(save_val, bp->regview + offset);
4522                 ret = -ENODEV;
4523                 break;
4524         }
4525         return ret;
4526 }
4527
4528 static int
4529 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4530 {
4531         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4532                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4533         int i;
4534
4535         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4536                 u32 offset;
4537
4538                 for (offset = 0; offset < size; offset += 4) {
4539
4540                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4541
4542                         if (REG_RD_IND(bp, start + offset) !=
4543                                 test_pattern[i]) {
4544                                 return -ENODEV;
4545                         }
4546                 }
4547         }
4548         return 0;
4549 }
4550
4551 static int
4552 bnx2_test_memory(struct bnx2 *bp)
4553 {
4554         int ret = 0;
4555         int i;
4556         static struct mem_entry {
4557                 u32   offset;
4558                 u32   len;
4559         } mem_tbl_5706[] = {
4560                 { 0x60000,  0x4000 },
4561                 { 0xa0000,  0x3000 },
4562                 { 0xe0000,  0x4000 },
4563                 { 0x120000, 0x4000 },
4564                 { 0x1a0000, 0x4000 },
4565                 { 0x160000, 0x4000 },
4566                 { 0xffffffff, 0    },
4567         },
4568         mem_tbl_5709[] = {
4569                 { 0x60000,  0x4000 },
4570                 { 0xa0000,  0x3000 },
4571                 { 0xe0000,  0x4000 },
4572                 { 0x120000, 0x4000 },
4573                 { 0x1a0000, 0x4000 },
4574                 { 0xffffffff, 0    },
4575         };
4576         struct mem_entry *mem_tbl;
4577
4578         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4579                 mem_tbl = mem_tbl_5709;
4580         else
4581                 mem_tbl = mem_tbl_5706;
4582
4583         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4584                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4585                         mem_tbl[i].len)) != 0) {
4586                         return ret;
4587                 }
4588         }
4589
4590         return ret;
4591 }
4592
4593 #define BNX2_MAC_LOOPBACK       0
4594 #define BNX2_PHY_LOOPBACK       1
4595
4596 static int
4597 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4598 {
4599         unsigned int pkt_size, num_pkts, i;
4600         struct sk_buff *skb, *rx_skb;
4601         unsigned char *packet;
4602         u16 rx_start_idx, rx_idx;
4603         dma_addr_t map;
4604         struct tx_bd *txbd;
4605         struct sw_bd *rx_buf;
4606         struct l2_fhdr *rx_hdr;
4607         int ret = -ENODEV;
4608
4609         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4610                 bp->loopback = MAC_LOOPBACK;
4611                 bnx2_set_mac_loopback(bp);
4612         }
4613         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4614                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4615                         return 0;
4616
4617                 bp->loopback = PHY_LOOPBACK;
4618                 bnx2_set_phy_loopback(bp);
4619         }
4620         else
4621                 return -EINVAL;
4622
4623         pkt_size = 1514;
4624         skb = netdev_alloc_skb(bp->dev, pkt_size);
4625         if (!skb)
4626                 return -ENOMEM;
4627         packet = skb_put(skb, pkt_size);
4628         memcpy(packet, bp->dev->dev_addr, 6);
4629         memset(packet + 6, 0x0, 8);
4630         for (i = 14; i < pkt_size; i++)
4631                 packet[i] = (unsigned char) (i & 0xff);
4632
4633         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4634                 PCI_DMA_TODEVICE);
4635
4636         REG_WR(bp, BNX2_HC_COMMAND,
4637                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4638
4639         REG_RD(bp, BNX2_HC_COMMAND);
4640
4641         udelay(5);
4642         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4643
4644         num_pkts = 0;
4645
4646         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4647
4648         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4649         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4650         txbd->tx_bd_mss_nbytes = pkt_size;
4651         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4652
4653         num_pkts++;
4654         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4655         bp->tx_prod_bseq += pkt_size;
4656
4657         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4658         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4659
4660         udelay(100);
4661
4662         REG_WR(bp, BNX2_HC_COMMAND,
4663                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4664
4665         REG_RD(bp, BNX2_HC_COMMAND);
4666
4667         udelay(5);
4668
4669         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4670         dev_kfree_skb(skb);
4671
4672         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4673                 goto loopback_test_done;
4674         }
4675
4676         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4677         if (rx_idx != rx_start_idx + num_pkts) {
4678                 goto loopback_test_done;
4679         }
4680
4681         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4682         rx_skb = rx_buf->skb;
4683
4684         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4685         skb_reserve(rx_skb, bp->rx_offset);
4686
4687         pci_dma_sync_single_for_cpu(bp->pdev,
4688                 pci_unmap_addr(rx_buf, mapping),
4689                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4690
4691         if (rx_hdr->l2_fhdr_status &
4692                 (L2_FHDR_ERRORS_BAD_CRC |
4693                 L2_FHDR_ERRORS_PHY_DECODE |
4694                 L2_FHDR_ERRORS_ALIGNMENT |
4695                 L2_FHDR_ERRORS_TOO_SHORT |
4696                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4697
4698                 goto loopback_test_done;
4699         }
4700
4701         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4702                 goto loopback_test_done;
4703         }
4704
4705         for (i = 14; i < pkt_size; i++) {
4706                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4707                         goto loopback_test_done;
4708                 }
4709         }
4710
4711         ret = 0;
4712
4713 loopback_test_done:
4714         bp->loopback = 0;
4715         return ret;
4716 }
4717
4718 #define BNX2_MAC_LOOPBACK_FAILED        1
4719 #define BNX2_PHY_LOOPBACK_FAILED        2
4720 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4721                                          BNX2_PHY_LOOPBACK_FAILED)
4722
4723 static int
4724 bnx2_test_loopback(struct bnx2 *bp)
4725 {
4726         int rc = 0;
4727
4728         if (!netif_running(bp->dev))
4729                 return BNX2_LOOPBACK_FAILED;
4730
4731         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4732         spin_lock_bh(&bp->phy_lock);
4733         bnx2_init_phy(bp);
4734         spin_unlock_bh(&bp->phy_lock);
4735         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4736                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4737         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4738                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4739         return rc;
4740 }
4741
4742 #define NVRAM_SIZE 0x200
4743 #define CRC32_RESIDUAL 0xdebb20e3
4744
4745 static int
4746 bnx2_test_nvram(struct bnx2 *bp)
4747 {
4748         u32 buf[NVRAM_SIZE / 4];
4749         u8 *data = (u8 *) buf;
4750         int rc = 0;
4751         u32 magic, csum;
4752
4753         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4754                 goto test_nvram_done;
4755
4756         magic = be32_to_cpu(buf[0]);
4757         if (magic != 0x669955aa) {
4758                 rc = -ENODEV;
4759                 goto test_nvram_done;
4760         }
4761
4762         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4763                 goto test_nvram_done;
4764
4765         csum = ether_crc_le(0x100, data);
4766         if (csum != CRC32_RESIDUAL) {
4767                 rc = -ENODEV;
4768                 goto test_nvram_done;
4769         }
4770
4771         csum = ether_crc_le(0x100, data + 0x100);
4772         if (csum != CRC32_RESIDUAL) {
4773                 rc = -ENODEV;
4774         }
4775
4776 test_nvram_done:
4777         return rc;
4778 }
4779
4780 static int
4781 bnx2_test_link(struct bnx2 *bp)
4782 {
4783         u32 bmsr;
4784
4785         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4786                 if (bp->link_up)
4787                         return 0;
4788                 return -ENODEV;
4789         }
4790         spin_lock_bh(&bp->phy_lock);
4791         bnx2_enable_bmsr1(bp);
4792         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4793         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4794         bnx2_disable_bmsr1(bp);
4795         spin_unlock_bh(&bp->phy_lock);
4796
4797         if (bmsr & BMSR_LSTATUS) {
4798                 return 0;
4799         }
4800         return -ENODEV;
4801 }
4802
4803 static int
4804 bnx2_test_intr(struct bnx2 *bp)
4805 {
4806         int i;
4807         u16 status_idx;
4808
4809         if (!netif_running(bp->dev))
4810                 return -ENODEV;
4811
4812         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4813
4814         /* This register is not touched during run-time. */
4815         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4816         REG_RD(bp, BNX2_HC_COMMAND);
4817
4818         for (i = 0; i < 10; i++) {
4819                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4820                         status_idx) {
4821
4822                         break;
4823                 }
4824
4825                 msleep_interruptible(10);
4826         }
4827         if (i < 10)
4828                 return 0;
4829
4830         return -ENODEV;
4831 }
4832
4833 static void
4834 bnx2_5706_serdes_timer(struct bnx2 *bp)
4835 {
4836         spin_lock(&bp->phy_lock);
4837         if (bp->serdes_an_pending)
4838                 bp->serdes_an_pending--;
4839         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4840                 u32 bmcr;
4841
4842                 bp->current_interval = bp->timer_interval;
4843
4844                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4845
4846                 if (bmcr & BMCR_ANENABLE) {
4847                         u32 phy1, phy2;
4848
4849                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4850                         bnx2_read_phy(bp, 0x1c, &phy1);
4851
4852                         bnx2_write_phy(bp, 0x17, 0x0f01);
4853                         bnx2_read_phy(bp, 0x15, &phy2);
4854                         bnx2_write_phy(bp, 0x17, 0x0f01);
4855                         bnx2_read_phy(bp, 0x15, &phy2);
4856
4857                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4858                                 !(phy2 & 0x20)) {       /* no CONFIG */
4859
4860                                 bmcr &= ~BMCR_ANENABLE;
4861                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4862                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4863                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4864                         }
4865                 }
4866         }
4867         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4868                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4869                 u32 phy2;
4870
4871                 bnx2_write_phy(bp, 0x17, 0x0f01);
4872                 bnx2_read_phy(bp, 0x15, &phy2);
4873                 if (phy2 & 0x20) {
4874                         u32 bmcr;
4875
4876                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4877                         bmcr |= BMCR_ANENABLE;
4878                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4879
4880                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4881                 }
4882         } else
4883                 bp->current_interval = bp->timer_interval;
4884
4885         spin_unlock(&bp->phy_lock);
4886 }
4887
4888 static void
4889 bnx2_5708_serdes_timer(struct bnx2 *bp)
4890 {
4891         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4892                 return;
4893
4894         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4895                 bp->serdes_an_pending = 0;
4896                 return;
4897         }
4898
4899         spin_lock(&bp->phy_lock);
4900         if (bp->serdes_an_pending)
4901                 bp->serdes_an_pending--;
4902         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4903                 u32 bmcr;
4904
4905                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4906                 if (bmcr & BMCR_ANENABLE) {
4907                         bnx2_enable_forced_2g5(bp);
4908                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4909                 } else {
4910                         bnx2_disable_forced_2g5(bp);
4911                         bp->serdes_an_pending = 2;
4912                         bp->current_interval = bp->timer_interval;
4913                 }
4914
4915         } else
4916                 bp->current_interval = bp->timer_interval;
4917
4918         spin_unlock(&bp->phy_lock);
4919 }
4920
4921 static void
4922 bnx2_timer(unsigned long data)
4923 {
4924         struct bnx2 *bp = (struct bnx2 *) data;
4925
4926         if (!netif_running(bp->dev))
4927                 return;
4928
4929         if (atomic_read(&bp->intr_sem) != 0)
4930                 goto bnx2_restart_timer;
4931
4932         bnx2_send_heart_beat(bp);
4933
4934         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4935
4936         /* workaround occasional corrupted counters */
4937         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4938                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4939                                             BNX2_HC_COMMAND_STATS_NOW);
4940
4941         if (bp->phy_flags & PHY_SERDES_FLAG) {
4942                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4943                         bnx2_5706_serdes_timer(bp);
4944                 else
4945                         bnx2_5708_serdes_timer(bp);
4946         }
4947
4948 bnx2_restart_timer:
4949         mod_timer(&bp->timer, jiffies + bp->current_interval);
4950 }
4951
4952 static int
4953 bnx2_request_irq(struct bnx2 *bp)
4954 {
4955         struct net_device *dev = bp->dev;
4956         int rc = 0;
4957
4958         if (bp->flags & USING_MSI_FLAG) {
4959                 irq_handler_t   fn = bnx2_msi;
4960
4961                 if (bp->flags & ONE_SHOT_MSI_FLAG)
4962                         fn = bnx2_msi_1shot;
4963
4964                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4965         } else
4966                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4967                                  IRQF_SHARED, dev->name, dev);
4968         return rc;
4969 }
4970
4971 static void
4972 bnx2_free_irq(struct bnx2 *bp)
4973 {
4974         struct net_device *dev = bp->dev;
4975
4976         if (bp->flags & USING_MSI_FLAG) {
4977                 free_irq(bp->pdev->irq, dev);
4978                 pci_disable_msi(bp->pdev);
4979                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4980         } else
4981                 free_irq(bp->pdev->irq, dev);
4982 }
4983
4984 /* Called with rtnl_lock */
4985 static int
4986 bnx2_open(struct net_device *dev)
4987 {
4988         struct bnx2 *bp = netdev_priv(dev);
4989         int rc;
4990
4991         netif_carrier_off(dev);
4992
4993         bnx2_set_power_state(bp, PCI_D0);
4994         bnx2_disable_int(bp);
4995
4996         rc = bnx2_alloc_mem(bp);
4997         if (rc)
4998                 return rc;
4999
5000         napi_enable(&bp->napi);
5001
5002         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5003                 if (pci_enable_msi(bp->pdev) == 0) {
5004                         bp->flags |= USING_MSI_FLAG;
5005                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5006                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5007                 }
5008         }
5009         rc = bnx2_request_irq(bp);
5010
5011         if (rc) {
5012                 napi_disable(&bp->napi);
5013                 bnx2_free_mem(bp);
5014                 return rc;
5015         }
5016
5017         rc = bnx2_init_nic(bp);
5018
5019         if (rc) {
5020                 napi_disable(&bp->napi);
5021                 bnx2_free_irq(bp);
5022                 bnx2_free_skbs(bp);
5023                 bnx2_free_mem(bp);
5024                 return rc;
5025         }
5026
5027         mod_timer(&bp->timer, jiffies + bp->current_interval);
5028
5029         atomic_set(&bp->intr_sem, 0);
5030
5031         bnx2_enable_int(bp);
5032
5033         if (bp->flags & USING_MSI_FLAG) {
5034                 /* Test MSI to make sure it is working
5035                  * If MSI test fails, go back to INTx mode
5036                  */
5037                 if (bnx2_test_intr(bp) != 0) {
5038                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5039                                " using MSI, switching to INTx mode. Please"
5040                                " report this failure to the PCI maintainer"
5041                                " and include system chipset information.\n",
5042                                bp->dev->name);
5043
5044                         bnx2_disable_int(bp);
5045                         bnx2_free_irq(bp);
5046
5047                         rc = bnx2_init_nic(bp);
5048
5049                         if (!rc)
5050                                 rc = bnx2_request_irq(bp);
5051
5052                         if (rc) {
5053                                 napi_disable(&bp->napi);
5054                                 bnx2_free_skbs(bp);
5055                                 bnx2_free_mem(bp);
5056                                 del_timer_sync(&bp->timer);
5057                                 return rc;
5058                         }
5059                         bnx2_enable_int(bp);
5060                 }
5061         }
5062         if (bp->flags & USING_MSI_FLAG) {
5063                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5064         }
5065
5066         netif_start_queue(dev);
5067
5068         return 0;
5069 }
5070
5071 static void
5072 bnx2_reset_task(struct work_struct *work)
5073 {
5074         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5075
5076         if (!netif_running(bp->dev))
5077                 return;
5078
5079         bp->in_reset_task = 1;
5080         bnx2_netif_stop(bp);
5081
5082         bnx2_init_nic(bp);
5083
5084         atomic_set(&bp->intr_sem, 1);
5085         bnx2_netif_start(bp);
5086         bp->in_reset_task = 0;
5087 }
5088
5089 static void
5090 bnx2_tx_timeout(struct net_device *dev)
5091 {
5092         struct bnx2 *bp = netdev_priv(dev);
5093
5094         /* This allows the netif to be shutdown gracefully before resetting */
5095         schedule_work(&bp->reset_task);
5096 }
5097
5098 #ifdef BCM_VLAN
5099 /* Called with rtnl_lock */
5100 static void
5101 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5102 {
5103         struct bnx2 *bp = netdev_priv(dev);
5104
5105         bnx2_netif_stop(bp);
5106
5107         bp->vlgrp = vlgrp;
5108         bnx2_set_rx_mode(dev);
5109
5110         bnx2_netif_start(bp);
5111 }
5112 #endif
5113
5114 /* Called with netif_tx_lock.
5115  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5116  * netif_wake_queue().
5117  */
5118 static int
5119 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5120 {
5121         struct bnx2 *bp = netdev_priv(dev);
5122         dma_addr_t mapping;
5123         struct tx_bd *txbd;
5124         struct sw_bd *tx_buf;
5125         u32 len, vlan_tag_flags, last_frag, mss;
5126         u16 prod, ring_prod;
5127         int i;
5128
5129         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5130                 netif_stop_queue(dev);
5131                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5132                         dev->name);
5133
5134                 return NETDEV_TX_BUSY;
5135         }
5136         len = skb_headlen(skb);
5137         prod = bp->tx_prod;
5138         ring_prod = TX_RING_IDX(prod);
5139
5140         vlan_tag_flags = 0;
5141         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5142                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5143         }
5144
5145         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5146                 vlan_tag_flags |=
5147                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5148         }
5149         if ((mss = skb_shinfo(skb)->gso_size)) {
5150                 u32 tcp_opt_len, ip_tcp_len;
5151                 struct iphdr *iph;
5152
5153                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5154
5155                 tcp_opt_len = tcp_optlen(skb);
5156
5157                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5158                         u32 tcp_off = skb_transport_offset(skb) -
5159                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5160
5161                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5162                                           TX_BD_FLAGS_SW_FLAGS;
5163                         if (likely(tcp_off == 0))
5164                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5165                         else {
5166                                 tcp_off >>= 3;
5167                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5168                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5169                                                   ((tcp_off & 0x10) <<
5170                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5171                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5172                         }
5173                 } else {
5174                         if (skb_header_cloned(skb) &&
5175                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5176                                 dev_kfree_skb(skb);
5177                                 return NETDEV_TX_OK;
5178                         }
5179
5180                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5181
5182                         iph = ip_hdr(skb);
5183                         iph->check = 0;
5184                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5185                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5186                                                                  iph->daddr, 0,
5187                                                                  IPPROTO_TCP,
5188                                                                  0);
5189                         if (tcp_opt_len || (iph->ihl > 5)) {
5190                                 vlan_tag_flags |= ((iph->ihl - 5) +
5191                                                    (tcp_opt_len >> 2)) << 8;
5192                         }
5193                 }
5194         } else
5195                 mss = 0;
5196
5197         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5198
5199         tx_buf = &bp->tx_buf_ring[ring_prod];
5200         tx_buf->skb = skb;
5201         pci_unmap_addr_set(tx_buf, mapping, mapping);
5202
5203         txbd = &bp->tx_desc_ring[ring_prod];
5204
5205         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5206         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5207         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5208         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5209
5210         last_frag = skb_shinfo(skb)->nr_frags;
5211
5212         for (i = 0; i < last_frag; i++) {
5213                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5214
5215                 prod = NEXT_TX_BD(prod);
5216                 ring_prod = TX_RING_IDX(prod);
5217                 txbd = &bp->tx_desc_ring[ring_prod];
5218
5219                 len = frag->size;
5220                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5221                         len, PCI_DMA_TODEVICE);
5222                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5223                                 mapping, mapping);
5224
5225                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5226                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5227                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5228                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5229
5230         }
5231         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5232
5233         prod = NEXT_TX_BD(prod);
5234         bp->tx_prod_bseq += skb->len;
5235
5236         REG_WR16(bp, bp->tx_bidx_addr, prod);
5237         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5238
5239         mmiowb();
5240
5241         bp->tx_prod = prod;
5242         dev->trans_start = jiffies;
5243
5244         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5245                 netif_stop_queue(dev);
5246                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5247                         netif_wake_queue(dev);
5248         }
5249
5250         return NETDEV_TX_OK;
5251 }
5252
5253 /* Called with rtnl_lock */
5254 static int
5255 bnx2_close(struct net_device *dev)
5256 {
5257         struct bnx2 *bp = netdev_priv(dev);
5258         u32 reset_code;
5259
5260         /* Calling flush_scheduled_work() may deadlock because
5261          * linkwatch_event() may be on the workqueue and it will try to get
5262          * the rtnl_lock which we are holding.
5263          */
5264         while (bp->in_reset_task)
5265                 msleep(1);
5266
5267         bnx2_disable_int_sync(bp);
5268         napi_disable(&bp->napi);
5269         del_timer_sync(&bp->timer);
5270         if (bp->flags & NO_WOL_FLAG)
5271                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5272         else if (bp->wol)
5273                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5274         else
5275                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5276         bnx2_reset_chip(bp, reset_code);
5277         bnx2_free_irq(bp);
5278         bnx2_free_skbs(bp);
5279         bnx2_free_mem(bp);
5280         bp->link_up = 0;
5281         netif_carrier_off(bp->dev);
5282         bnx2_set_power_state(bp, PCI_D3hot);
5283         return 0;
5284 }
5285
5286 #define GET_NET_STATS64(ctr)                                    \
5287         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5288         (unsigned long) (ctr##_lo)
5289
5290 #define GET_NET_STATS32(ctr)            \
5291         (ctr##_lo)
5292
5293 #if (BITS_PER_LONG == 64)
5294 #define GET_NET_STATS   GET_NET_STATS64
5295 #else
5296 #define GET_NET_STATS   GET_NET_STATS32
5297 #endif
5298
5299 static struct net_device_stats *
5300 bnx2_get_stats(struct net_device *dev)
5301 {
5302         struct bnx2 *bp = netdev_priv(dev);
5303         struct statistics_block *stats_blk = bp->stats_blk;
5304         struct net_device_stats *net_stats = &bp->net_stats;
5305
5306         if (bp->stats_blk == NULL) {
5307                 return net_stats;
5308         }
5309         net_stats->rx_packets =
5310                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5311                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5312                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5313
5314         net_stats->tx_packets =
5315                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5316                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5317                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5318
5319         net_stats->rx_bytes =
5320                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5321
5322         net_stats->tx_bytes =
5323                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5324
5325         net_stats->multicast =
5326                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5327
5328         net_stats->collisions =
5329                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5330
5331         net_stats->rx_length_errors =
5332                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5333                 stats_blk->stat_EtherStatsOverrsizePkts);
5334
5335         net_stats->rx_over_errors =
5336                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5337
5338         net_stats->rx_frame_errors =
5339                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5340
5341         net_stats->rx_crc_errors =
5342                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5343
5344         net_stats->rx_errors = net_stats->rx_length_errors +
5345                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5346                 net_stats->rx_crc_errors;
5347
5348         net_stats->tx_aborted_errors =
5349                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5350                 stats_blk->stat_Dot3StatsLateCollisions);
5351
5352         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5353             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5354                 net_stats->tx_carrier_errors = 0;
5355         else {
5356                 net_stats->tx_carrier_errors =
5357                         (unsigned long)
5358                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5359         }
5360
5361         net_stats->tx_errors =
5362                 (unsigned long)
5363                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5364                 +
5365                 net_stats->tx_aborted_errors +
5366                 net_stats->tx_carrier_errors;
5367
5368         net_stats->rx_missed_errors =
5369                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5370                 stats_blk->stat_FwRxDrop);
5371
5372         return net_stats;
5373 }
5374
5375 /* All ethtool functions called with rtnl_lock */
5376
5377 static int
5378 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5379 {
5380         struct bnx2 *bp = netdev_priv(dev);
5381         int support_serdes = 0, support_copper = 0;
5382
5383         cmd->supported = SUPPORTED_Autoneg;
5384         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5385                 support_serdes = 1;
5386                 support_copper = 1;
5387         } else if (bp->phy_port == PORT_FIBRE)
5388                 support_serdes = 1;
5389         else
5390                 support_copper = 1;
5391
5392         if (support_serdes) {
5393                 cmd->supported |= SUPPORTED_1000baseT_Full |
5394                         SUPPORTED_FIBRE;
5395                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5396                         cmd->supported |= SUPPORTED_2500baseX_Full;
5397
5398         }
5399         if (support_copper) {
5400                 cmd->supported |= SUPPORTED_10baseT_Half |
5401                         SUPPORTED_10baseT_Full |
5402                         SUPPORTED_100baseT_Half |
5403                         SUPPORTED_100baseT_Full |
5404                         SUPPORTED_1000baseT_Full |
5405                         SUPPORTED_TP;
5406
5407         }
5408
5409         spin_lock_bh(&bp->phy_lock);
5410         cmd->port = bp->phy_port;
5411         cmd->advertising = bp->advertising;
5412
5413         if (bp->autoneg & AUTONEG_SPEED) {
5414                 cmd->autoneg = AUTONEG_ENABLE;
5415         }
5416         else {
5417                 cmd->autoneg = AUTONEG_DISABLE;
5418         }
5419
5420         if (netif_carrier_ok(dev)) {
5421                 cmd->speed = bp->line_speed;
5422                 cmd->duplex = bp->duplex;
5423         }
5424         else {
5425                 cmd->speed = -1;
5426                 cmd->duplex = -1;
5427         }
5428         spin_unlock_bh(&bp->phy_lock);
5429
5430         cmd->transceiver = XCVR_INTERNAL;
5431         cmd->phy_address = bp->phy_addr;
5432
5433         return 0;
5434 }
5435
5436 static int
5437 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5438 {
5439         struct bnx2 *bp = netdev_priv(dev);
5440         u8 autoneg = bp->autoneg;
5441         u8 req_duplex = bp->req_duplex;
5442         u16 req_line_speed = bp->req_line_speed;
5443         u32 advertising = bp->advertising;
5444         int err = -EINVAL;
5445
5446         spin_lock_bh(&bp->phy_lock);
5447
5448         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5449                 goto err_out_unlock;
5450
5451         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5452                 goto err_out_unlock;
5453
5454         if (cmd->autoneg == AUTONEG_ENABLE) {
5455                 autoneg |= AUTONEG_SPEED;
5456
5457                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5458
5459                 /* allow advertising 1 speed */
5460                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5461                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5462                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5463                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5464
5465                         if (cmd->port == PORT_FIBRE)
5466                                 goto err_out_unlock;
5467
5468                         advertising = cmd->advertising;
5469
5470                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5471                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5472                             (cmd->port == PORT_TP))
5473                                 goto err_out_unlock;
5474                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5475                         advertising = cmd->advertising;
5476                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5477                         goto err_out_unlock;
5478                 else {
5479                         if (cmd->port == PORT_FIBRE)
5480                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5481                         else
5482                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5483                 }
5484                 advertising |= ADVERTISED_Autoneg;
5485         }
5486         else {
5487                 if (cmd->port == PORT_FIBRE) {
5488                         if ((cmd->speed != SPEED_1000 &&
5489                              cmd->speed != SPEED_2500) ||
5490                             (cmd->duplex != DUPLEX_FULL))
5491                                 goto err_out_unlock;
5492
5493                         if (cmd->speed == SPEED_2500 &&
5494                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5495                                 goto err_out_unlock;
5496                 }
5497                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5498                         goto err_out_unlock;
5499
5500                 autoneg &= ~AUTONEG_SPEED;
5501                 req_line_speed = cmd->speed;
5502                 req_duplex = cmd->duplex;
5503                 advertising = 0;
5504         }
5505
5506         bp->autoneg = autoneg;
5507         bp->advertising = advertising;
5508         bp->req_line_speed = req_line_speed;
5509         bp->req_duplex = req_duplex;
5510
5511         err = bnx2_setup_phy(bp, cmd->port);
5512
5513 err_out_unlock:
5514         spin_unlock_bh(&bp->phy_lock);
5515
5516         return err;
5517 }
5518
5519 static void
5520 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5521 {
5522         struct bnx2 *bp = netdev_priv(dev);
5523
5524         strcpy(info->driver, DRV_MODULE_NAME);
5525         strcpy(info->version, DRV_MODULE_VERSION);
5526         strcpy(info->bus_info, pci_name(bp->pdev));
5527         strcpy(info->fw_version, bp->fw_version);
5528 }
5529
5530 #define BNX2_REGDUMP_LEN                (32 * 1024)
5531
5532 static int
5533 bnx2_get_regs_len(struct net_device *dev)
5534 {
5535         return BNX2_REGDUMP_LEN;
5536 }
5537
5538 static void
5539 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5540 {
5541         u32 *p = _p, i, offset;
5542         u8 *orig_p = _p;
5543         struct bnx2 *bp = netdev_priv(dev);
5544         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5545                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5546                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5547                                  0x1040, 0x1048, 0x1080, 0x10a4,
5548                                  0x1400, 0x1490, 0x1498, 0x14f0,
5549                                  0x1500, 0x155c, 0x1580, 0x15dc,
5550                                  0x1600, 0x1658, 0x1680, 0x16d8,
5551                                  0x1800, 0x1820, 0x1840, 0x1854,
5552                                  0x1880, 0x1894, 0x1900, 0x1984,
5553                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5554                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5555                                  0x2000, 0x2030, 0x23c0, 0x2400,
5556                                  0x2800, 0x2820, 0x2830, 0x2850,
5557                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5558                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5559                                  0x4080, 0x4090, 0x43c0, 0x4458,
5560                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5561                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5562                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5563                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5564                                  0x6800, 0x6848, 0x684c, 0x6860,
5565                                  0x6888, 0x6910, 0x8000 };
5566
5567         regs->version = 0;
5568
5569         memset(p, 0, BNX2_REGDUMP_LEN);
5570
5571         if (!netif_running(bp->dev))
5572                 return;
5573
5574         i = 0;
5575         offset = reg_boundaries[0];
5576         p += offset;
5577         while (offset < BNX2_REGDUMP_LEN) {
5578                 *p++ = REG_RD(bp, offset);
5579                 offset += 4;
5580                 if (offset == reg_boundaries[i + 1]) {
5581                         offset = reg_boundaries[i + 2];
5582                         p = (u32 *) (orig_p + offset);
5583                         i += 2;
5584                 }
5585         }
5586 }
5587
5588 static void
5589 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5590 {
5591         struct bnx2 *bp = netdev_priv(dev);
5592
5593         if (bp->flags & NO_WOL_FLAG) {
5594                 wol->supported = 0;
5595                 wol->wolopts = 0;
5596         }
5597         else {
5598                 wol->supported = WAKE_MAGIC;
5599                 if (bp->wol)
5600                         wol->wolopts = WAKE_MAGIC;
5601                 else
5602                         wol->wolopts = 0;
5603         }
5604         memset(&wol->sopass, 0, sizeof(wol->sopass));
5605 }
5606
5607 static int
5608 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5609 {
5610         struct bnx2 *bp = netdev_priv(dev);
5611
5612         if (wol->wolopts & ~WAKE_MAGIC)
5613                 return -EINVAL;
5614
5615         if (wol->wolopts & WAKE_MAGIC) {
5616                 if (bp->flags & NO_WOL_FLAG)
5617                         return -EINVAL;
5618
5619                 bp->wol = 1;
5620         }
5621         else {
5622                 bp->wol = 0;
5623         }
5624         return 0;
5625 }
5626
5627 static int
5628 bnx2_nway_reset(struct net_device *dev)
5629 {
5630         struct bnx2 *bp = netdev_priv(dev);
5631         u32 bmcr;
5632
5633         if (!(bp->autoneg & AUTONEG_SPEED)) {
5634                 return -EINVAL;
5635         }
5636
5637         spin_lock_bh(&bp->phy_lock);
5638
5639         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5640                 int rc;
5641
5642                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5643                 spin_unlock_bh(&bp->phy_lock);
5644                 return rc;
5645         }
5646
5647         /* Force a link down visible on the other side */
5648         if (bp->phy_flags & PHY_SERDES_FLAG) {
5649                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5650                 spin_unlock_bh(&bp->phy_lock);
5651
5652                 msleep(20);
5653
5654                 spin_lock_bh(&bp->phy_lock);
5655
5656                 bp->current_interval = SERDES_AN_TIMEOUT;
5657                 bp->serdes_an_pending = 1;
5658                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5659         }
5660
5661         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5662         bmcr &= ~BMCR_LOOPBACK;
5663         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5664
5665         spin_unlock_bh(&bp->phy_lock);
5666
5667         return 0;
5668 }
5669
5670 static int
5671 bnx2_get_eeprom_len(struct net_device *dev)
5672 {
5673         struct bnx2 *bp = netdev_priv(dev);
5674
5675         if (bp->flash_info == NULL)
5676                 return 0;
5677
5678         return (int) bp->flash_size;
5679 }
5680
5681 static int
5682 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5683                 u8 *eebuf)
5684 {
5685         struct bnx2 *bp = netdev_priv(dev);
5686         int rc;
5687
5688         /* parameters already validated in ethtool_get_eeprom */
5689
5690         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5691
5692         return rc;
5693 }
5694
5695 static int
5696 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5697                 u8 *eebuf)
5698 {
5699         struct bnx2 *bp = netdev_priv(dev);
5700         int rc;
5701
5702         /* parameters already validated in ethtool_set_eeprom */
5703
5704         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5705
5706         return rc;
5707 }
5708
5709 static int
5710 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5711 {
5712         struct bnx2 *bp = netdev_priv(dev);
5713
5714         memset(coal, 0, sizeof(struct ethtool_coalesce));
5715
5716         coal->rx_coalesce_usecs = bp->rx_ticks;
5717         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5718         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5719         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5720
5721         coal->tx_coalesce_usecs = bp->tx_ticks;
5722         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5723         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5724         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5725
5726         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5727
5728         return 0;
5729 }
5730
5731 static int
5732 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5733 {
5734         struct bnx2 *bp = netdev_priv(dev);
5735
5736         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5737         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5738
5739         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5740         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5741
5742         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5743         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5744
5745         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5746         if (bp->rx_quick_cons_trip_int > 0xff)
5747                 bp->rx_quick_cons_trip_int = 0xff;
5748
5749         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5750         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5751
5752         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5753         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5754
5755         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5756         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5757
5758         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5759         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5760                 0xff;
5761
5762         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5763         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5764                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5765                         bp->stats_ticks = USEC_PER_SEC;
5766         }
5767         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5768                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5769         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5770
5771         if (netif_running(bp->dev)) {
5772                 bnx2_netif_stop(bp);
5773                 bnx2_init_nic(bp);
5774                 bnx2_netif_start(bp);
5775         }
5776
5777         return 0;
5778 }
5779
5780 static void
5781 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5782 {
5783         struct bnx2 *bp = netdev_priv(dev);
5784
5785         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5786         ering->rx_mini_max_pending = 0;
5787         ering->rx_jumbo_max_pending = 0;
5788
5789         ering->rx_pending = bp->rx_ring_size;
5790         ering->rx_mini_pending = 0;
5791         ering->rx_jumbo_pending = 0;
5792
5793         ering->tx_max_pending = MAX_TX_DESC_CNT;
5794         ering->tx_pending = bp->tx_ring_size;
5795 }
5796
5797 static int
5798 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5799 {
5800         struct bnx2 *bp = netdev_priv(dev);
5801
5802         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5803                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5804                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5805
5806                 return -EINVAL;
5807         }
5808         if (netif_running(bp->dev)) {
5809                 bnx2_netif_stop(bp);
5810                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5811                 bnx2_free_skbs(bp);
5812                 bnx2_free_mem(bp);
5813         }
5814
5815         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5816         bp->tx_ring_size = ering->tx_pending;
5817
5818         if (netif_running(bp->dev)) {
5819                 int rc;
5820
5821                 rc = bnx2_alloc_mem(bp);
5822                 if (rc)
5823                         return rc;
5824                 bnx2_init_nic(bp);
5825                 bnx2_netif_start(bp);
5826         }
5827
5828         return 0;
5829 }
5830
5831 static void
5832 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5833 {
5834         struct bnx2 *bp = netdev_priv(dev);
5835
5836         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5837         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5838         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5839 }
5840
5841 static int
5842 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5843 {
5844         struct bnx2 *bp = netdev_priv(dev);
5845
5846         bp->req_flow_ctrl = 0;
5847         if (epause->rx_pause)
5848                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5849         if (epause->tx_pause)
5850                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5851
5852         if (epause->autoneg) {
5853                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5854         }
5855         else {
5856                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5857         }
5858
5859         spin_lock_bh(&bp->phy_lock);
5860
5861         bnx2_setup_phy(bp, bp->phy_port);
5862
5863         spin_unlock_bh(&bp->phy_lock);
5864
5865         return 0;
5866 }
5867
5868 static u32
5869 bnx2_get_rx_csum(struct net_device *dev)
5870 {
5871         struct bnx2 *bp = netdev_priv(dev);
5872
5873         return bp->rx_csum;
5874 }
5875
5876 static int
5877 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5878 {
5879         struct bnx2 *bp = netdev_priv(dev);
5880
5881         bp->rx_csum = data;
5882         return 0;
5883 }
5884
5885 static int
5886 bnx2_set_tso(struct net_device *dev, u32 data)
5887 {
5888         struct bnx2 *bp = netdev_priv(dev);
5889
5890         if (data) {
5891                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5892                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5893                         dev->features |= NETIF_F_TSO6;
5894         } else
5895                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5896                                    NETIF_F_TSO_ECN);
5897         return 0;
5898 }
5899
5900 #define BNX2_NUM_STATS 46
5901
5902 static struct {
5903         char string[ETH_GSTRING_LEN];
5904 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5905         { "rx_bytes" },
5906         { "rx_error_bytes" },
5907         { "tx_bytes" },
5908         { "tx_error_bytes" },
5909         { "rx_ucast_packets" },
5910         { "rx_mcast_packets" },
5911         { "rx_bcast_packets" },
5912         { "tx_ucast_packets" },
5913         { "tx_mcast_packets" },
5914         { "tx_bcast_packets" },
5915         { "tx_mac_errors" },
5916         { "tx_carrier_errors" },
5917         { "rx_crc_errors" },
5918         { "rx_align_errors" },
5919         { "tx_single_collisions" },
5920         { "tx_multi_collisions" },
5921         { "tx_deferred" },
5922         { "tx_excess_collisions" },
5923         { "tx_late_collisions" },
5924         { "tx_total_collisions" },
5925         { "rx_fragments" },
5926         { "rx_jabbers" },
5927         { "rx_undersize_packets" },
5928         { "rx_oversize_packets" },
5929         { "rx_64_byte_packets" },
5930         { "rx_65_to_127_byte_packets" },
5931         { "rx_128_to_255_byte_packets" },
5932         { "rx_256_to_511_byte_packets" },
5933         { "rx_512_to_1023_byte_packets" },
5934         { "rx_1024_to_1522_byte_packets" },
5935         { "rx_1523_to_9022_byte_packets" },
5936         { "tx_64_byte_packets" },
5937         { "tx_65_to_127_byte_packets" },
5938         { "tx_128_to_255_byte_packets" },
5939         { "tx_256_to_511_byte_packets" },
5940         { "tx_512_to_1023_byte_packets" },
5941         { "tx_1024_to_1522_byte_packets" },
5942         { "tx_1523_to_9022_byte_packets" },
5943         { "rx_xon_frames" },
5944         { "rx_xoff_frames" },
5945         { "tx_xon_frames" },
5946         { "tx_xoff_frames" },
5947         { "rx_mac_ctrl_frames" },
5948         { "rx_filtered_packets" },
5949         { "rx_discards" },
5950         { "rx_fw_discards" },
5951 };
5952
5953 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5954
5955 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5956     STATS_OFFSET32(stat_IfHCInOctets_hi),
5957     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5958     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5959     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5960     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5961     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5962     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5963     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5964     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5965     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5966     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5967     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5968     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5969     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5970     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5971     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5972     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5973     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5974     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5975     STATS_OFFSET32(stat_EtherStatsCollisions),
5976     STATS_OFFSET32(stat_EtherStatsFragments),
5977     STATS_OFFSET32(stat_EtherStatsJabbers),
5978     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5979     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5980     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5981     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5982     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5983     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5984     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5985     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5986     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5987     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5988     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5989     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5990     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5991     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5992     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5993     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5994     STATS_OFFSET32(stat_XonPauseFramesReceived),
5995     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5996     STATS_OFFSET32(stat_OutXonSent),
5997     STATS_OFFSET32(stat_OutXoffSent),
5998     STATS_OFFSET32(stat_MacControlFramesReceived),
5999     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6000     STATS_OFFSET32(stat_IfInMBUFDiscards),
6001     STATS_OFFSET32(stat_FwRxDrop),
6002 };
6003
6004 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6005  * skipped because of errata.
6006  */
6007 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6008         8,0,8,8,8,8,8,8,8,8,
6009         4,0,4,4,4,4,4,4,4,4,
6010         4,4,4,4,4,4,4,4,4,4,
6011         4,4,4,4,4,4,4,4,4,4,
6012         4,4,4,4,4,4,
6013 };
6014
6015 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6016         8,0,8,8,8,8,8,8,8,8,
6017         4,4,4,4,4,4,4,4,4,4,
6018         4,4,4,4,4,4,4,4,4,4,
6019         4,4,4,4,4,4,4,4,4,4,
6020         4,4,4,4,4,4,
6021 };
6022
6023 #define BNX2_NUM_TESTS 6
6024
6025 static struct {
6026         char string[ETH_GSTRING_LEN];
6027 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6028         { "register_test (offline)" },
6029         { "memory_test (offline)" },
6030         { "loopback_test (offline)" },
6031         { "nvram_test (online)" },
6032         { "interrupt_test (online)" },
6033         { "link_test (online)" },
6034 };
6035
6036 static int
6037 bnx2_get_sset_count(struct net_device *dev, int sset)
6038 {
6039         switch (sset) {
6040         case ETH_SS_TEST:
6041                 return BNX2_NUM_TESTS;
6042         case ETH_SS_STATS:
6043                 return BNX2_NUM_STATS;
6044         default:
6045                 return -EOPNOTSUPP;
6046         }
6047 }
6048
6049 static void
6050 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6051 {
6052         struct bnx2 *bp = netdev_priv(dev);
6053
6054         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6055         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6056                 int i;
6057
6058                 bnx2_netif_stop(bp);
6059                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6060                 bnx2_free_skbs(bp);
6061
6062                 if (bnx2_test_registers(bp) != 0) {
6063                         buf[0] = 1;
6064                         etest->flags |= ETH_TEST_FL_FAILED;
6065                 }
6066                 if (bnx2_test_memory(bp) != 0) {
6067                         buf[1] = 1;
6068                         etest->flags |= ETH_TEST_FL_FAILED;
6069                 }
6070                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6071                         etest->flags |= ETH_TEST_FL_FAILED;
6072
6073                 if (!netif_running(bp->dev)) {
6074                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6075                 }
6076                 else {
6077                         bnx2_init_nic(bp);
6078                         bnx2_netif_start(bp);
6079                 }
6080
6081                 /* wait for link up */
6082                 for (i = 0; i < 7; i++) {
6083                         if (bp->link_up)
6084                                 break;
6085                         msleep_interruptible(1000);
6086                 }
6087         }
6088
6089         if (bnx2_test_nvram(bp) != 0) {
6090                 buf[3] = 1;
6091                 etest->flags |= ETH_TEST_FL_FAILED;
6092         }
6093         if (bnx2_test_intr(bp) != 0) {
6094                 buf[4] = 1;
6095                 etest->flags |= ETH_TEST_FL_FAILED;
6096         }
6097
6098         if (bnx2_test_link(bp) != 0) {
6099                 buf[5] = 1;
6100                 etest->flags |= ETH_TEST_FL_FAILED;
6101
6102         }
6103 }
6104
6105 static void
6106 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6107 {
6108         switch (stringset) {
6109         case ETH_SS_STATS:
6110                 memcpy(buf, bnx2_stats_str_arr,
6111                         sizeof(bnx2_stats_str_arr));
6112                 break;
6113         case ETH_SS_TEST:
6114                 memcpy(buf, bnx2_tests_str_arr,
6115                         sizeof(bnx2_tests_str_arr));
6116                 break;
6117         }
6118 }
6119
6120 static void
6121 bnx2_get_ethtool_stats(struct net_device *dev,
6122                 struct ethtool_stats *stats, u64 *buf)
6123 {
6124         struct bnx2 *bp = netdev_priv(dev);
6125         int i;
6126         u32 *hw_stats = (u32 *) bp->stats_blk;
6127         u8 *stats_len_arr = NULL;
6128
6129         if (hw_stats == NULL) {
6130                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6131                 return;
6132         }
6133
6134         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6135             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6136             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6137             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6138                 stats_len_arr = bnx2_5706_stats_len_arr;
6139         else
6140                 stats_len_arr = bnx2_5708_stats_len_arr;
6141
6142         for (i = 0; i < BNX2_NUM_STATS; i++) {
6143                 if (stats_len_arr[i] == 0) {
6144                         /* skip this counter */
6145                         buf[i] = 0;
6146                         continue;
6147                 }
6148                 if (stats_len_arr[i] == 4) {
6149                         /* 4-byte counter */
6150                         buf[i] = (u64)
6151                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6152                         continue;
6153                 }
6154                 /* 8-byte counter */
6155                 buf[i] = (((u64) *(hw_stats +
6156                                         bnx2_stats_offset_arr[i])) << 32) +
6157                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6158         }
6159 }
6160
6161 static int
6162 bnx2_phys_id(struct net_device *dev, u32 data)
6163 {
6164         struct bnx2 *bp = netdev_priv(dev);
6165         int i;
6166         u32 save;
6167
6168         if (data == 0)
6169                 data = 2;
6170
6171         save = REG_RD(bp, BNX2_MISC_CFG);
6172         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6173
6174         for (i = 0; i < (data * 2); i++) {
6175                 if ((i % 2) == 0) {
6176                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6177                 }
6178                 else {
6179                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6180                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6181                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6182                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6183                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6184                                 BNX2_EMAC_LED_TRAFFIC);
6185                 }
6186                 msleep_interruptible(500);
6187                 if (signal_pending(current))
6188                         break;
6189         }
6190         REG_WR(bp, BNX2_EMAC_LED, 0);
6191         REG_WR(bp, BNX2_MISC_CFG, save);
6192         return 0;
6193 }
6194
6195 static int
6196 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6197 {
6198         struct bnx2 *bp = netdev_priv(dev);
6199
6200         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6201                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6202         else
6203                 return (ethtool_op_set_tx_csum(dev, data));
6204 }
6205
6206 static const struct ethtool_ops bnx2_ethtool_ops = {
6207         .get_settings           = bnx2_get_settings,
6208         .set_settings           = bnx2_set_settings,
6209         .get_drvinfo            = bnx2_get_drvinfo,
6210         .get_regs_len           = bnx2_get_regs_len,
6211         .get_regs               = bnx2_get_regs,
6212         .get_wol                = bnx2_get_wol,
6213         .set_wol                = bnx2_set_wol,
6214         .nway_reset             = bnx2_nway_reset,
6215         .get_link               = ethtool_op_get_link,
6216         .get_eeprom_len         = bnx2_get_eeprom_len,
6217         .get_eeprom             = bnx2_get_eeprom,
6218         .set_eeprom             = bnx2_set_eeprom,
6219         .get_coalesce           = bnx2_get_coalesce,
6220         .set_coalesce           = bnx2_set_coalesce,
6221         .get_ringparam          = bnx2_get_ringparam,
6222         .set_ringparam          = bnx2_set_ringparam,
6223         .get_pauseparam         = bnx2_get_pauseparam,
6224         .set_pauseparam         = bnx2_set_pauseparam,
6225         .get_rx_csum            = bnx2_get_rx_csum,
6226         .set_rx_csum            = bnx2_set_rx_csum,
6227         .set_tx_csum            = bnx2_set_tx_csum,
6228         .set_sg                 = ethtool_op_set_sg,
6229         .set_tso                = bnx2_set_tso,
6230         .self_test              = bnx2_self_test,
6231         .get_strings            = bnx2_get_strings,
6232         .phys_id                = bnx2_phys_id,
6233         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6234         .get_sset_count         = bnx2_get_sset_count,
6235 };
6236
6237 /* Called with rtnl_lock */
6238 static int
6239 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6240 {
6241         struct mii_ioctl_data *data = if_mii(ifr);
6242         struct bnx2 *bp = netdev_priv(dev);
6243         int err;
6244
6245         switch(cmd) {
6246         case SIOCGMIIPHY:
6247                 data->phy_id = bp->phy_addr;
6248
6249                 /* fallthru */
6250         case SIOCGMIIREG: {
6251                 u32 mii_regval;
6252
6253                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6254                         return -EOPNOTSUPP;
6255
6256                 if (!netif_running(dev))
6257                         return -EAGAIN;
6258
6259                 spin_lock_bh(&bp->phy_lock);
6260                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6261                 spin_unlock_bh(&bp->phy_lock);
6262
6263                 data->val_out = mii_regval;
6264
6265                 return err;
6266         }
6267
6268         case SIOCSMIIREG:
6269                 if (!capable(CAP_NET_ADMIN))
6270                         return -EPERM;
6271
6272                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6273                         return -EOPNOTSUPP;
6274
6275                 if (!netif_running(dev))
6276                         return -EAGAIN;
6277
6278                 spin_lock_bh(&bp->phy_lock);
6279                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6280                 spin_unlock_bh(&bp->phy_lock);
6281
6282                 return err;
6283
6284         default:
6285                 /* do nothing */
6286                 break;
6287         }
6288         return -EOPNOTSUPP;
6289 }
6290
6291 /* Called with rtnl_lock */
6292 static int
6293 bnx2_change_mac_addr(struct net_device *dev, void *p)
6294 {
6295         struct sockaddr *addr = p;
6296         struct bnx2 *bp = netdev_priv(dev);
6297
6298         if (!is_valid_ether_addr(addr->sa_data))
6299                 return -EINVAL;
6300
6301         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6302         if (netif_running(dev))
6303                 bnx2_set_mac_addr(bp);
6304
6305         return 0;
6306 }
6307
6308 /* Called with rtnl_lock */
6309 static int
6310 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6311 {
6312         struct bnx2 *bp = netdev_priv(dev);
6313
6314         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6315                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6316                 return -EINVAL;
6317
6318         dev->mtu = new_mtu;
6319         if (netif_running(dev)) {
6320                 bnx2_netif_stop(bp);
6321
6322                 bnx2_init_nic(bp);
6323
6324                 bnx2_netif_start(bp);
6325         }
6326         return 0;
6327 }
6328
6329 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6330 static void
6331 poll_bnx2(struct net_device *dev)
6332 {
6333         struct bnx2 *bp = netdev_priv(dev);
6334
6335         disable_irq(bp->pdev->irq);
6336         bnx2_interrupt(bp->pdev->irq, dev);
6337         enable_irq(bp->pdev->irq);
6338 }
6339 #endif
6340
6341 static void __devinit
6342 bnx2_get_5709_media(struct bnx2 *bp)
6343 {
6344         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6345         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6346         u32 strap;
6347
6348         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6349                 return;
6350         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6351                 bp->phy_flags |= PHY_SERDES_FLAG;
6352                 return;
6353         }
6354
6355         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6356                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6357         else
6358                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6359
6360         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6361                 switch (strap) {
6362                 case 0x4:
6363                 case 0x5:
6364                 case 0x6:
6365                         bp->phy_flags |= PHY_SERDES_FLAG;
6366                         return;
6367                 }
6368         } else {
6369                 switch (strap) {
6370                 case 0x1:
6371                 case 0x2:
6372                 case 0x4:
6373                         bp->phy_flags |= PHY_SERDES_FLAG;
6374                         return;
6375                 }
6376         }
6377 }
6378
6379 static void __devinit
6380 bnx2_get_pci_speed(struct bnx2 *bp)
6381 {
6382         u32 reg;
6383
6384         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6385         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6386                 u32 clkreg;
6387
6388                 bp->flags |= PCIX_FLAG;
6389
6390                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6391
6392                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6393                 switch (clkreg) {
6394                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6395                         bp->bus_speed_mhz = 133;
6396                         break;
6397
6398                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6399                         bp->bus_speed_mhz = 100;
6400                         break;
6401
6402                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6403                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6404                         bp->bus_speed_mhz = 66;
6405                         break;
6406
6407                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6408                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6409                         bp->bus_speed_mhz = 50;
6410                         break;
6411
6412                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6413                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6414                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6415                         bp->bus_speed_mhz = 33;
6416                         break;
6417                 }
6418         }
6419         else {
6420                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6421                         bp->bus_speed_mhz = 66;
6422                 else
6423                         bp->bus_speed_mhz = 33;
6424         }
6425
6426         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6427                 bp->flags |= PCI_32BIT_FLAG;
6428
6429 }
6430
6431 static int __devinit
6432 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6433 {
6434         struct bnx2 *bp;
6435         unsigned long mem_len;
6436         int rc, i, j;
6437         u32 reg;
6438         u64 dma_mask, persist_dma_mask;
6439
6440         SET_NETDEV_DEV(dev, &pdev->dev);
6441         bp = netdev_priv(dev);
6442
6443         bp->flags = 0;
6444         bp->phy_flags = 0;
6445
6446         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6447         rc = pci_enable_device(pdev);
6448         if (rc) {
6449                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6450                 goto err_out;
6451         }
6452
6453         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6454                 dev_err(&pdev->dev,
6455                         "Cannot find PCI device base address, aborting.\n");
6456                 rc = -ENODEV;
6457                 goto err_out_disable;
6458         }
6459
6460         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6461         if (rc) {
6462                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6463                 goto err_out_disable;
6464         }
6465
6466         pci_set_master(pdev);
6467
6468         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6469         if (bp->pm_cap == 0) {
6470                 dev_err(&pdev->dev,
6471                         "Cannot find power management capability, aborting.\n");
6472                 rc = -EIO;
6473                 goto err_out_release;
6474         }
6475
6476         bp->dev = dev;
6477         bp->pdev = pdev;
6478
6479         spin_lock_init(&bp->phy_lock);
6480         spin_lock_init(&bp->indirect_lock);
6481         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6482
6483         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6484         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6485         dev->mem_end = dev->mem_start + mem_len;
6486         dev->irq = pdev->irq;
6487
6488         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6489
6490         if (!bp->regview) {
6491                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6492                 rc = -ENOMEM;
6493                 goto err_out_release;
6494         }
6495
6496         /* Configure byte swap and enable write to the reg_window registers.
6497          * Rely on CPU to do target byte swapping on big endian systems
6498          * The chip's target access swapping will not swap all accesses
6499          */
6500         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6501                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6502                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6503
6504         bnx2_set_power_state(bp, PCI_D0);
6505
6506         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6507
6508         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6509                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6510                         dev_err(&pdev->dev,
6511                                 "Cannot find PCIE capability, aborting.\n");
6512                         rc = -EIO;
6513                         goto err_out_unmap;
6514                 }
6515                 bp->flags |= PCIE_FLAG;
6516         } else {
6517                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6518                 if (bp->pcix_cap == 0) {
6519                         dev_err(&pdev->dev,
6520                                 "Cannot find PCIX capability, aborting.\n");
6521                         rc = -EIO;
6522                         goto err_out_unmap;
6523                 }
6524         }
6525
6526         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6527                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6528                         bp->flags |= MSI_CAP_FLAG;
6529         }
6530
6531         /* 5708 cannot support DMA addresses > 40-bit.  */
6532         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6533                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6534         else
6535                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6536
6537         /* Configure DMA attributes. */
6538         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6539                 dev->features |= NETIF_F_HIGHDMA;
6540                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6541                 if (rc) {
6542                         dev_err(&pdev->dev,
6543                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6544                         goto err_out_unmap;
6545                 }
6546         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6547                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6548                 goto err_out_unmap;
6549         }
6550
6551         if (!(bp->flags & PCIE_FLAG))
6552                 bnx2_get_pci_speed(bp);
6553
6554         /* 5706A0 may falsely detect SERR and PERR. */
6555         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6556                 reg = REG_RD(bp, PCI_COMMAND);
6557                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6558                 REG_WR(bp, PCI_COMMAND, reg);
6559         }
6560         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6561                 !(bp->flags & PCIX_FLAG)) {
6562
6563                 dev_err(&pdev->dev,
6564                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6565                 goto err_out_unmap;
6566         }
6567
6568         bnx2_init_nvram(bp);
6569
6570         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6571
6572         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6573             BNX2_SHM_HDR_SIGNATURE_SIG) {
6574                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6575
6576                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6577         } else
6578                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6579
6580         /* Get the permanent MAC address.  First we need to make sure the
6581          * firmware is actually running.
6582          */
6583         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6584
6585         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6586             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6587                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6588                 rc = -ENODEV;
6589                 goto err_out_unmap;
6590         }
6591
6592         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6593         for (i = 0, j = 0; i < 3; i++) {
6594                 u8 num, k, skip0;
6595
6596                 num = (u8) (reg >> (24 - (i * 8)));
6597                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6598                         if (num >= k || !skip0 || k == 1) {
6599                                 bp->fw_version[j++] = (num / k) + '0';
6600                                 skip0 = 0;
6601                         }
6602                 }
6603                 if (i != 2)
6604                         bp->fw_version[j++] = '.';
6605         }
6606         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6607         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6608                 bp->wol = 1;
6609
6610         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6611                 bp->flags |= ASF_ENABLE_FLAG;
6612
6613                 for (i = 0; i < 30; i++) {
6614                         reg = REG_RD_IND(bp, bp->shmem_base +
6615                                              BNX2_BC_STATE_CONDITION);
6616                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6617                                 break;
6618                         msleep(10);
6619                 }
6620         }
6621         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6622         reg &= BNX2_CONDITION_MFW_RUN_MASK;
6623         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6624             reg != BNX2_CONDITION_MFW_RUN_NONE) {
6625                 int i;
6626                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6627
6628                 bp->fw_version[j++] = ' ';
6629                 for (i = 0; i < 3; i++) {
6630                         reg = REG_RD_IND(bp, addr + i * 4);
6631                         reg = swab32(reg);
6632                         memcpy(&bp->fw_version[j], &reg, 4);
6633                         j += 4;
6634                 }
6635         }
6636
6637         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6638         bp->mac_addr[0] = (u8) (reg >> 8);
6639         bp->mac_addr[1] = (u8) reg;
6640
6641         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6642         bp->mac_addr[2] = (u8) (reg >> 24);
6643         bp->mac_addr[3] = (u8) (reg >> 16);
6644         bp->mac_addr[4] = (u8) (reg >> 8);
6645         bp->mac_addr[5] = (u8) reg;
6646
6647         bp->tx_ring_size = MAX_TX_DESC_CNT;
6648         bnx2_set_rx_ring_size(bp, 255);
6649
6650         bp->rx_csum = 1;
6651
6652         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6653
6654         bp->tx_quick_cons_trip_int = 20;
6655         bp->tx_quick_cons_trip = 20;
6656         bp->tx_ticks_int = 80;
6657         bp->tx_ticks = 80;
6658
6659         bp->rx_quick_cons_trip_int = 6;
6660         bp->rx_quick_cons_trip = 6;
6661         bp->rx_ticks_int = 18;
6662         bp->rx_ticks = 18;
6663
6664         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6665
6666         bp->timer_interval =  HZ;
6667         bp->current_interval =  HZ;
6668
6669         bp->phy_addr = 1;
6670
6671         /* Disable WOL support if we are running on a SERDES chip. */
6672         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6673                 bnx2_get_5709_media(bp);
6674         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6675                 bp->phy_flags |= PHY_SERDES_FLAG;
6676
6677         bp->phy_port = PORT_TP;
6678         if (bp->phy_flags & PHY_SERDES_FLAG) {
6679                 bp->phy_port = PORT_FIBRE;
6680                 reg = REG_RD_IND(bp, bp->shmem_base +
6681                                      BNX2_SHARED_HW_CFG_CONFIG);
6682                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6683                         bp->flags |= NO_WOL_FLAG;
6684                         bp->wol = 0;
6685                 }
6686                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6687                         bp->phy_addr = 2;
6688                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6689                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6690                 }
6691                 bnx2_init_remote_phy(bp);
6692
6693         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6694                    CHIP_NUM(bp) == CHIP_NUM_5708)
6695                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6696         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6697                  (CHIP_REV(bp) == CHIP_REV_Ax ||
6698                   CHIP_REV(bp) == CHIP_REV_Bx))
6699                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6700
6701         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6702             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6703             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6704                 bp->flags |= NO_WOL_FLAG;
6705                 bp->wol = 0;
6706         }
6707
6708         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6709                 bp->tx_quick_cons_trip_int =
6710                         bp->tx_quick_cons_trip;
6711                 bp->tx_ticks_int = bp->tx_ticks;
6712                 bp->rx_quick_cons_trip_int =
6713                         bp->rx_quick_cons_trip;
6714                 bp->rx_ticks_int = bp->rx_ticks;
6715                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6716                 bp->com_ticks_int = bp->com_ticks;
6717                 bp->cmd_ticks_int = bp->cmd_ticks;
6718         }
6719
6720         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6721          *
6722          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6723          * with byte enables disabled on the unused 32-bit word.  This is legal
6724          * but causes problems on the AMD 8132 which will eventually stop
6725          * responding after a while.
6726          *
6727          * AMD believes this incompatibility is unique to the 5706, and
6728          * prefers to locally disable MSI rather than globally disabling it.
6729          */
6730         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6731                 struct pci_dev *amd_8132 = NULL;
6732
6733                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6734                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6735                                                   amd_8132))) {
6736
6737                         if (amd_8132->revision >= 0x10 &&
6738                             amd_8132->revision <= 0x13) {
6739                                 disable_msi = 1;
6740                                 pci_dev_put(amd_8132);
6741                                 break;
6742                         }
6743                 }
6744         }
6745
6746         bnx2_set_default_link(bp);
6747         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6748
6749         init_timer(&bp->timer);
6750         bp->timer.expires = RUN_AT(bp->timer_interval);
6751         bp->timer.data = (unsigned long) bp;
6752         bp->timer.function = bnx2_timer;
6753
6754         return 0;
6755
6756 err_out_unmap:
6757         if (bp->regview) {
6758                 iounmap(bp->regview);
6759                 bp->regview = NULL;
6760         }
6761
6762 err_out_release:
6763         pci_release_regions(pdev);
6764
6765 err_out_disable:
6766         pci_disable_device(pdev);
6767         pci_set_drvdata(pdev, NULL);
6768
6769 err_out:
6770         return rc;
6771 }
6772
6773 static char * __devinit
6774 bnx2_bus_string(struct bnx2 *bp, char *str)
6775 {
6776         char *s = str;
6777
6778         if (bp->flags & PCIE_FLAG) {
6779                 s += sprintf(s, "PCI Express");
6780         } else {
6781                 s += sprintf(s, "PCI");
6782                 if (bp->flags & PCIX_FLAG)
6783                         s += sprintf(s, "-X");
6784                 if (bp->flags & PCI_32BIT_FLAG)
6785                         s += sprintf(s, " 32-bit");
6786                 else
6787                         s += sprintf(s, " 64-bit");
6788                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6789         }
6790         return str;
6791 }
6792
6793 static int __devinit
6794 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6795 {
6796         static int version_printed = 0;
6797         struct net_device *dev = NULL;
6798         struct bnx2 *bp;
6799         int rc;
6800         char str[40];
6801         DECLARE_MAC_BUF(mac);
6802
6803         if (version_printed++ == 0)
6804                 printk(KERN_INFO "%s", version);
6805
6806         /* dev zeroed in init_etherdev */
6807         dev = alloc_etherdev(sizeof(*bp));
6808
6809         if (!dev)
6810                 return -ENOMEM;
6811
6812         rc = bnx2_init_board(pdev, dev);
6813         if (rc < 0) {
6814                 free_netdev(dev);
6815                 return rc;
6816         }
6817
6818         dev->open = bnx2_open;
6819         dev->hard_start_xmit = bnx2_start_xmit;
6820         dev->stop = bnx2_close;
6821         dev->get_stats = bnx2_get_stats;
6822         dev->set_multicast_list = bnx2_set_rx_mode;
6823         dev->do_ioctl = bnx2_ioctl;
6824         dev->set_mac_address = bnx2_change_mac_addr;
6825         dev->change_mtu = bnx2_change_mtu;
6826         dev->tx_timeout = bnx2_tx_timeout;
6827         dev->watchdog_timeo = TX_TIMEOUT;
6828 #ifdef BCM_VLAN
6829         dev->vlan_rx_register = bnx2_vlan_rx_register;
6830 #endif
6831         dev->ethtool_ops = &bnx2_ethtool_ops;
6832
6833         bp = netdev_priv(dev);
6834         netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6835
6836 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6837         dev->poll_controller = poll_bnx2;
6838 #endif
6839
6840         pci_set_drvdata(pdev, dev);
6841
6842         memcpy(dev->dev_addr, bp->mac_addr, 6);
6843         memcpy(dev->perm_addr, bp->mac_addr, 6);
6844         bp->name = board_info[ent->driver_data].name;
6845
6846         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6847         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6848                 dev->features |= NETIF_F_IPV6_CSUM;
6849
6850 #ifdef BCM_VLAN
6851         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6852 #endif
6853         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6854         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6855                 dev->features |= NETIF_F_TSO6;
6856
6857         if ((rc = register_netdev(dev))) {
6858                 dev_err(&pdev->dev, "Cannot register net device\n");
6859                 if (bp->regview)
6860                         iounmap(bp->regview);
6861                 pci_release_regions(pdev);
6862                 pci_disable_device(pdev);
6863                 pci_set_drvdata(pdev, NULL);
6864                 free_netdev(dev);
6865                 return rc;
6866         }
6867
6868         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6869                 "IRQ %d, node addr %s\n",
6870                 dev->name,
6871                 bp->name,
6872                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6873                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6874                 bnx2_bus_string(bp, str),
6875                 dev->base_addr,
6876                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6877
6878         return 0;
6879 }
6880
6881 static void __devexit
6882 bnx2_remove_one(struct pci_dev *pdev)
6883 {
6884         struct net_device *dev = pci_get_drvdata(pdev);
6885         struct bnx2 *bp = netdev_priv(dev);
6886
6887         flush_scheduled_work();
6888
6889         unregister_netdev(dev);
6890
6891         if (bp->regview)
6892                 iounmap(bp->regview);
6893
6894         free_netdev(dev);
6895         pci_release_regions(pdev);
6896         pci_disable_device(pdev);
6897         pci_set_drvdata(pdev, NULL);
6898 }
6899
6900 static int
6901 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6902 {
6903         struct net_device *dev = pci_get_drvdata(pdev);
6904         struct bnx2 *bp = netdev_priv(dev);
6905         u32 reset_code;
6906
6907         /* PCI register 4 needs to be saved whether netif_running() or not.
6908          * MSI address and data need to be saved if using MSI and
6909          * netif_running().
6910          */
6911         pci_save_state(pdev);
6912         if (!netif_running(dev))
6913                 return 0;
6914
6915         flush_scheduled_work();
6916         bnx2_netif_stop(bp);
6917         netif_device_detach(dev);
6918         del_timer_sync(&bp->timer);
6919         if (bp->flags & NO_WOL_FLAG)
6920                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6921         else if (bp->wol)
6922                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6923         else
6924                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6925         bnx2_reset_chip(bp, reset_code);
6926         bnx2_free_skbs(bp);
6927         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6928         return 0;
6929 }
6930
6931 static int
6932 bnx2_resume(struct pci_dev *pdev)
6933 {
6934         struct net_device *dev = pci_get_drvdata(pdev);
6935         struct bnx2 *bp = netdev_priv(dev);
6936
6937         pci_restore_state(pdev);
6938         if (!netif_running(dev))
6939                 return 0;
6940
6941         bnx2_set_power_state(bp, PCI_D0);
6942         netif_device_attach(dev);
6943         bnx2_init_nic(bp);
6944         bnx2_netif_start(bp);
6945         return 0;
6946 }
6947
6948 static struct pci_driver bnx2_pci_driver = {
6949         .name           = DRV_MODULE_NAME,
6950         .id_table       = bnx2_pci_tbl,
6951         .probe          = bnx2_init_one,
6952         .remove         = __devexit_p(bnx2_remove_one),
6953         .suspend        = bnx2_suspend,
6954         .resume         = bnx2_resume,
6955 };
6956
6957 static int __init bnx2_init(void)
6958 {
6959         return pci_register_driver(&bnx2_pci_driver);
6960 }
6961
6962 static void __exit bnx2_cleanup(void)
6963 {
6964         pci_unregister_driver(&bnx2_pci_driver);
6965 }
6966
6967 module_init(bnx2_init);
6968 module_exit(bnx2_cleanup);
6969
6970
6971