]> err.no Git - linux-2.6/blob - drivers/net/bnx2.c
3b907196ca48cf0b92f50c66395bac695a78b587
[linux-2.6] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.7.6"
60 #define DRV_MODULE_RELDATE      "May 16, 2008"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = txr->tx_prod - txr->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
270 {
271         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
272 }
273
274 static u32
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
276 {
277         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
278 }
279
280 static void
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
282 {
283         offset += cid_addr;
284         spin_lock_bh(&bp->indirect_lock);
285         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
286                 int i;
287
288                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291                 for (i = 0; i < 5; i++) {
292                         u32 val;
293                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
295                                 break;
296                         udelay(5);
297                 }
298         } else {
299                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300                 REG_WR(bp, BNX2_CTX_DATA, val);
301         }
302         spin_unlock_bh(&bp->indirect_lock);
303 }
304
305 static int
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
307 {
308         u32 val1;
309         int i, ret;
310
311         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
314
315                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317
318                 udelay(40);
319         }
320
321         val1 = (bp->phy_addr << 21) | (reg << 16) |
322                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323                 BNX2_EMAC_MDIO_COMM_START_BUSY;
324         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
325
326         for (i = 0; i < 50; i++) {
327                 udelay(10);
328
329                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331                         udelay(5);
332
333                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
335
336                         break;
337                 }
338         }
339
340         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
341                 *val = 0x0;
342                 ret = -EBUSY;
343         }
344         else {
345                 *val = val1;
346                 ret = 0;
347         }
348
349         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
352
353                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355
356                 udelay(40);
357         }
358
359         return ret;
360 }
361
362 static int
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
364 {
365         u32 val1;
366         int i, ret;
367
368         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
371
372                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374
375                 udelay(40);
376         }
377
378         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
382
383         for (i = 0; i < 50; i++) {
384                 udelay(10);
385
386                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
388                         udelay(5);
389                         break;
390                 }
391         }
392
393         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
394                 ret = -EBUSY;
395         else
396                 ret = 0;
397
398         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
401
402                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404
405                 udelay(40);
406         }
407
408         return ret;
409 }
410
411 static void
412 bnx2_disable_int(struct bnx2 *bp)
413 {
414         int i;
415         struct bnx2_napi *bnapi;
416
417         for (i = 0; i < bp->irq_nvecs; i++) {
418                 bnapi = &bp->bnx2_napi[i];
419                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
421         }
422         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
423 }
424
425 static void
426 bnx2_enable_int(struct bnx2 *bp)
427 {
428         int i;
429         struct bnx2_napi *bnapi;
430
431         for (i = 0; i < bp->irq_nvecs; i++) {
432                 bnapi = &bp->bnx2_napi[i];
433
434                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437                        bnapi->last_status_idx);
438
439                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441                        bnapi->last_status_idx);
442         }
443         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
444 }
445
446 static void
447 bnx2_disable_int_sync(struct bnx2 *bp)
448 {
449         int i;
450
451         atomic_inc(&bp->intr_sem);
452         bnx2_disable_int(bp);
453         for (i = 0; i < bp->irq_nvecs; i++)
454                 synchronize_irq(bp->irq_tbl[i].vector);
455 }
456
457 static void
458 bnx2_napi_disable(struct bnx2 *bp)
459 {
460         int i;
461
462         for (i = 0; i < bp->irq_nvecs; i++)
463                 napi_disable(&bp->bnx2_napi[i].napi);
464 }
465
466 static void
467 bnx2_napi_enable(struct bnx2 *bp)
468 {
469         int i;
470
471         for (i = 0; i < bp->irq_nvecs; i++)
472                 napi_enable(&bp->bnx2_napi[i].napi);
473 }
474
475 static void
476 bnx2_netif_stop(struct bnx2 *bp)
477 {
478         bnx2_disable_int_sync(bp);
479         if (netif_running(bp->dev)) {
480                 bnx2_napi_disable(bp);
481                 netif_tx_disable(bp->dev);
482                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
483         }
484 }
485
486 static void
487 bnx2_netif_start(struct bnx2 *bp)
488 {
489         if (atomic_dec_and_test(&bp->intr_sem)) {
490                 if (netif_running(bp->dev)) {
491                         netif_wake_queue(bp->dev);
492                         bnx2_napi_enable(bp);
493                         bnx2_enable_int(bp);
494                 }
495         }
496 }
497
498 static void
499 bnx2_free_tx_mem(struct bnx2 *bp)
500 {
501         int i;
502
503         for (i = 0; i < bp->num_tx_rings; i++) {
504                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
506
507                 if (txr->tx_desc_ring) {
508                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
509                                             txr->tx_desc_ring,
510                                             txr->tx_desc_mapping);
511                         txr->tx_desc_ring = NULL;
512                 }
513                 kfree(txr->tx_buf_ring);
514                 txr->tx_buf_ring = NULL;
515         }
516 }
517
518 static void
519 bnx2_free_rx_mem(struct bnx2 *bp)
520 {
521         int i;
522
523         for (i = 0; i < bp->num_rx_rings; i++) {
524                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
526                 int j;
527
528                 for (j = 0; j < bp->rx_max_ring; j++) {
529                         if (rxr->rx_desc_ring[j])
530                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531                                                     rxr->rx_desc_ring[j],
532                                                     rxr->rx_desc_mapping[j]);
533                         rxr->rx_desc_ring[j] = NULL;
534                 }
535                 if (rxr->rx_buf_ring)
536                         vfree(rxr->rx_buf_ring);
537                 rxr->rx_buf_ring = NULL;
538
539                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540                         if (rxr->rx_pg_desc_ring[j])
541                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542                                                     rxr->rx_pg_desc_ring[i],
543                                                     rxr->rx_pg_desc_mapping[i]);
544                         rxr->rx_pg_desc_ring[i] = NULL;
545                 }
546                 if (rxr->rx_pg_ring)
547                         vfree(rxr->rx_pg_ring);
548                 rxr->rx_pg_ring = NULL;
549         }
550 }
551
552 static int
553 bnx2_alloc_tx_mem(struct bnx2 *bp)
554 {
555         int i;
556
557         for (i = 0; i < bp->num_tx_rings; i++) {
558                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
559                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
560
561                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
562                 if (txr->tx_buf_ring == NULL)
563                         return -ENOMEM;
564
565                 txr->tx_desc_ring =
566                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
567                                              &txr->tx_desc_mapping);
568                 if (txr->tx_desc_ring == NULL)
569                         return -ENOMEM;
570         }
571         return 0;
572 }
573
574 static int
575 bnx2_alloc_rx_mem(struct bnx2 *bp)
576 {
577         int i;
578
579         for (i = 0; i < bp->num_rx_rings; i++) {
580                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
582                 int j;
583
584                 rxr->rx_buf_ring =
585                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586                 if (rxr->rx_buf_ring == NULL)
587                         return -ENOMEM;
588
589                 memset(rxr->rx_buf_ring, 0,
590                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
591
592                 for (j = 0; j < bp->rx_max_ring; j++) {
593                         rxr->rx_desc_ring[j] =
594                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595                                                      &rxr->rx_desc_mapping[j]);
596                         if (rxr->rx_desc_ring[j] == NULL)
597                                 return -ENOMEM;
598
599                 }
600
601                 if (bp->rx_pg_ring_size) {
602                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
603                                                   bp->rx_max_pg_ring);
604                         if (rxr->rx_pg_ring == NULL)
605                                 return -ENOMEM;
606
607                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
608                                bp->rx_max_pg_ring);
609                 }
610
611                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612                         rxr->rx_pg_desc_ring[j] =
613                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614                                                 &rxr->rx_pg_desc_mapping[j]);
615                         if (rxr->rx_pg_desc_ring[j] == NULL)
616                                 return -ENOMEM;
617
618                 }
619         }
620         return 0;
621 }
622
623 static void
624 bnx2_free_mem(struct bnx2 *bp)
625 {
626         int i;
627         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
628
629         bnx2_free_tx_mem(bp);
630         bnx2_free_rx_mem(bp);
631
632         for (i = 0; i < bp->ctx_pages; i++) {
633                 if (bp->ctx_blk[i]) {
634                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
635                                             bp->ctx_blk[i],
636                                             bp->ctx_blk_mapping[i]);
637                         bp->ctx_blk[i] = NULL;
638                 }
639         }
640         if (bnapi->status_blk.msi) {
641                 pci_free_consistent(bp->pdev, bp->status_stats_size,
642                                     bnapi->status_blk.msi,
643                                     bp->status_blk_mapping);
644                 bnapi->status_blk.msi = NULL;
645                 bp->stats_blk = NULL;
646         }
647 }
648
649 static int
650 bnx2_alloc_mem(struct bnx2 *bp)
651 {
652         int i, status_blk_size, err;
653         struct bnx2_napi *bnapi;
654         void *status_blk;
655
656         /* Combine status and statistics blocks into one allocation. */
657         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
658         if (bp->flags & BNX2_FLAG_MSIX_CAP)
659                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
660                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
661         bp->status_stats_size = status_blk_size +
662                                 sizeof(struct statistics_block);
663
664         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
665                                           &bp->status_blk_mapping);
666         if (status_blk == NULL)
667                 goto alloc_mem_err;
668
669         memset(status_blk, 0, bp->status_stats_size);
670
671         bnapi = &bp->bnx2_napi[0];
672         bnapi->status_blk.msi = status_blk;
673         bnapi->hw_tx_cons_ptr =
674                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
675         bnapi->hw_rx_cons_ptr =
676                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
677         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
678                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
679                         struct status_block_msix *sblk;
680
681                         bnapi = &bp->bnx2_napi[i];
682
683                         sblk = (void *) (status_blk +
684                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
685                         bnapi->status_blk.msix = sblk;
686                         bnapi->hw_tx_cons_ptr =
687                                 &sblk->status_tx_quick_consumer_index;
688                         bnapi->hw_rx_cons_ptr =
689                                 &sblk->status_rx_quick_consumer_index;
690                         bnapi->int_num = i << 24;
691                 }
692         }
693
694         bp->stats_blk = status_blk + status_blk_size;
695
696         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
697
698         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
699                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
700                 if (bp->ctx_pages == 0)
701                         bp->ctx_pages = 1;
702                 for (i = 0; i < bp->ctx_pages; i++) {
703                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
704                                                 BCM_PAGE_SIZE,
705                                                 &bp->ctx_blk_mapping[i]);
706                         if (bp->ctx_blk[i] == NULL)
707                                 goto alloc_mem_err;
708                 }
709         }
710
711         err = bnx2_alloc_rx_mem(bp);
712         if (err)
713                 goto alloc_mem_err;
714
715         err = bnx2_alloc_tx_mem(bp);
716         if (err)
717                 goto alloc_mem_err;
718
719         return 0;
720
721 alloc_mem_err:
722         bnx2_free_mem(bp);
723         return -ENOMEM;
724 }
725
726 static void
727 bnx2_report_fw_link(struct bnx2 *bp)
728 {
729         u32 fw_link_status = 0;
730
731         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
732                 return;
733
734         if (bp->link_up) {
735                 u32 bmsr;
736
737                 switch (bp->line_speed) {
738                 case SPEED_10:
739                         if (bp->duplex == DUPLEX_HALF)
740                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
741                         else
742                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
743                         break;
744                 case SPEED_100:
745                         if (bp->duplex == DUPLEX_HALF)
746                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
747                         else
748                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
749                         break;
750                 case SPEED_1000:
751                         if (bp->duplex == DUPLEX_HALF)
752                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
753                         else
754                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
755                         break;
756                 case SPEED_2500:
757                         if (bp->duplex == DUPLEX_HALF)
758                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
759                         else
760                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
761                         break;
762                 }
763
764                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
765
766                 if (bp->autoneg) {
767                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
768
769                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
770                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
771
772                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
773                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
774                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
775                         else
776                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
777                 }
778         }
779         else
780                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
781
782         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
783 }
784
785 static char *
786 bnx2_xceiver_str(struct bnx2 *bp)
787 {
788         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
789                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
790                  "Copper"));
791 }
792
793 static void
794 bnx2_report_link(struct bnx2 *bp)
795 {
796         if (bp->link_up) {
797                 netif_carrier_on(bp->dev);
798                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
799                        bnx2_xceiver_str(bp));
800
801                 printk("%d Mbps ", bp->line_speed);
802
803                 if (bp->duplex == DUPLEX_FULL)
804                         printk("full duplex");
805                 else
806                         printk("half duplex");
807
808                 if (bp->flow_ctrl) {
809                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
810                                 printk(", receive ");
811                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
812                                         printk("& transmit ");
813                         }
814                         else {
815                                 printk(", transmit ");
816                         }
817                         printk("flow control ON");
818                 }
819                 printk("\n");
820         }
821         else {
822                 netif_carrier_off(bp->dev);
823                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
824                        bnx2_xceiver_str(bp));
825         }
826
827         bnx2_report_fw_link(bp);
828 }
829
830 static void
831 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
832 {
833         u32 local_adv, remote_adv;
834
835         bp->flow_ctrl = 0;
836         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
837                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
838
839                 if (bp->duplex == DUPLEX_FULL) {
840                         bp->flow_ctrl = bp->req_flow_ctrl;
841                 }
842                 return;
843         }
844
845         if (bp->duplex != DUPLEX_FULL) {
846                 return;
847         }
848
849         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
850             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
851                 u32 val;
852
853                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
854                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
855                         bp->flow_ctrl |= FLOW_CTRL_TX;
856                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
857                         bp->flow_ctrl |= FLOW_CTRL_RX;
858                 return;
859         }
860
861         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
862         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
863
864         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
865                 u32 new_local_adv = 0;
866                 u32 new_remote_adv = 0;
867
868                 if (local_adv & ADVERTISE_1000XPAUSE)
869                         new_local_adv |= ADVERTISE_PAUSE_CAP;
870                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
871                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
872                 if (remote_adv & ADVERTISE_1000XPAUSE)
873                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
874                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
875                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
876
877                 local_adv = new_local_adv;
878                 remote_adv = new_remote_adv;
879         }
880
881         /* See Table 28B-3 of 802.3ab-1999 spec. */
882         if (local_adv & ADVERTISE_PAUSE_CAP) {
883                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
884                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
885                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
886                         }
887                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
888                                 bp->flow_ctrl = FLOW_CTRL_RX;
889                         }
890                 }
891                 else {
892                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
893                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894                         }
895                 }
896         }
897         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
898                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
899                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
900
901                         bp->flow_ctrl = FLOW_CTRL_TX;
902                 }
903         }
904 }
905
906 static int
907 bnx2_5709s_linkup(struct bnx2 *bp)
908 {
909         u32 val, speed;
910
911         bp->link_up = 1;
912
913         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
914         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
915         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
916
917         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
918                 bp->line_speed = bp->req_line_speed;
919                 bp->duplex = bp->req_duplex;
920                 return 0;
921         }
922         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
923         switch (speed) {
924                 case MII_BNX2_GP_TOP_AN_SPEED_10:
925                         bp->line_speed = SPEED_10;
926                         break;
927                 case MII_BNX2_GP_TOP_AN_SPEED_100:
928                         bp->line_speed = SPEED_100;
929                         break;
930                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
931                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
932                         bp->line_speed = SPEED_1000;
933                         break;
934                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
935                         bp->line_speed = SPEED_2500;
936                         break;
937         }
938         if (val & MII_BNX2_GP_TOP_AN_FD)
939                 bp->duplex = DUPLEX_FULL;
940         else
941                 bp->duplex = DUPLEX_HALF;
942         return 0;
943 }
944
945 static int
946 bnx2_5708s_linkup(struct bnx2 *bp)
947 {
948         u32 val;
949
950         bp->link_up = 1;
951         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
952         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
953                 case BCM5708S_1000X_STAT1_SPEED_10:
954                         bp->line_speed = SPEED_10;
955                         break;
956                 case BCM5708S_1000X_STAT1_SPEED_100:
957                         bp->line_speed = SPEED_100;
958                         break;
959                 case BCM5708S_1000X_STAT1_SPEED_1G:
960                         bp->line_speed = SPEED_1000;
961                         break;
962                 case BCM5708S_1000X_STAT1_SPEED_2G5:
963                         bp->line_speed = SPEED_2500;
964                         break;
965         }
966         if (val & BCM5708S_1000X_STAT1_FD)
967                 bp->duplex = DUPLEX_FULL;
968         else
969                 bp->duplex = DUPLEX_HALF;
970
971         return 0;
972 }
973
974 static int
975 bnx2_5706s_linkup(struct bnx2 *bp)
976 {
977         u32 bmcr, local_adv, remote_adv, common;
978
979         bp->link_up = 1;
980         bp->line_speed = SPEED_1000;
981
982         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
983         if (bmcr & BMCR_FULLDPLX) {
984                 bp->duplex = DUPLEX_FULL;
985         }
986         else {
987                 bp->duplex = DUPLEX_HALF;
988         }
989
990         if (!(bmcr & BMCR_ANENABLE)) {
991                 return 0;
992         }
993
994         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
995         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
996
997         common = local_adv & remote_adv;
998         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
999
1000                 if (common & ADVERTISE_1000XFULL) {
1001                         bp->duplex = DUPLEX_FULL;
1002                 }
1003                 else {
1004                         bp->duplex = DUPLEX_HALF;
1005                 }
1006         }
1007
1008         return 0;
1009 }
1010
1011 static int
1012 bnx2_copper_linkup(struct bnx2 *bp)
1013 {
1014         u32 bmcr;
1015
1016         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1017         if (bmcr & BMCR_ANENABLE) {
1018                 u32 local_adv, remote_adv, common;
1019
1020                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1021                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1022
1023                 common = local_adv & (remote_adv >> 2);
1024                 if (common & ADVERTISE_1000FULL) {
1025                         bp->line_speed = SPEED_1000;
1026                         bp->duplex = DUPLEX_FULL;
1027                 }
1028                 else if (common & ADVERTISE_1000HALF) {
1029                         bp->line_speed = SPEED_1000;
1030                         bp->duplex = DUPLEX_HALF;
1031                 }
1032                 else {
1033                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1034                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1035
1036                         common = local_adv & remote_adv;
1037                         if (common & ADVERTISE_100FULL) {
1038                                 bp->line_speed = SPEED_100;
1039                                 bp->duplex = DUPLEX_FULL;
1040                         }
1041                         else if (common & ADVERTISE_100HALF) {
1042                                 bp->line_speed = SPEED_100;
1043                                 bp->duplex = DUPLEX_HALF;
1044                         }
1045                         else if (common & ADVERTISE_10FULL) {
1046                                 bp->line_speed = SPEED_10;
1047                                 bp->duplex = DUPLEX_FULL;
1048                         }
1049                         else if (common & ADVERTISE_10HALF) {
1050                                 bp->line_speed = SPEED_10;
1051                                 bp->duplex = DUPLEX_HALF;
1052                         }
1053                         else {
1054                                 bp->line_speed = 0;
1055                                 bp->link_up = 0;
1056                         }
1057                 }
1058         }
1059         else {
1060                 if (bmcr & BMCR_SPEED100) {
1061                         bp->line_speed = SPEED_100;
1062                 }
1063                 else {
1064                         bp->line_speed = SPEED_10;
1065                 }
1066                 if (bmcr & BMCR_FULLDPLX) {
1067                         bp->duplex = DUPLEX_FULL;
1068                 }
1069                 else {
1070                         bp->duplex = DUPLEX_HALF;
1071                 }
1072         }
1073
1074         return 0;
1075 }
1076
1077 static void
1078 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1079 {
1080         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1081
1082         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1083         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1084         val |= 0x02 << 8;
1085
1086         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1087                 u32 lo_water, hi_water;
1088
1089                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1090                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1091                 else
1092                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1093                 if (lo_water >= bp->rx_ring_size)
1094                         lo_water = 0;
1095
1096                 hi_water = bp->rx_ring_size / 4;
1097
1098                 if (hi_water <= lo_water)
1099                         lo_water = 0;
1100
1101                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1102                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1103
1104                 if (hi_water > 0xf)
1105                         hi_water = 0xf;
1106                 else if (hi_water == 0)
1107                         lo_water = 0;
1108                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1109         }
1110         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1111 }
1112
1113 static void
1114 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1115 {
1116         int i;
1117         u32 cid;
1118
1119         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1120                 if (i == 1)
1121                         cid = RX_RSS_CID;
1122                 bnx2_init_rx_context(bp, cid);
1123         }
1124 }
1125
1126 static int
1127 bnx2_set_mac_link(struct bnx2 *bp)
1128 {
1129         u32 val;
1130
1131         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1132         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1133                 (bp->duplex == DUPLEX_HALF)) {
1134                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1135         }
1136
1137         /* Configure the EMAC mode register. */
1138         val = REG_RD(bp, BNX2_EMAC_MODE);
1139
1140         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1141                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1142                 BNX2_EMAC_MODE_25G_MODE);
1143
1144         if (bp->link_up) {
1145                 switch (bp->line_speed) {
1146                         case SPEED_10:
1147                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1148                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1149                                         break;
1150                                 }
1151                                 /* fall through */
1152                         case SPEED_100:
1153                                 val |= BNX2_EMAC_MODE_PORT_MII;
1154                                 break;
1155                         case SPEED_2500:
1156                                 val |= BNX2_EMAC_MODE_25G_MODE;
1157                                 /* fall through */
1158                         case SPEED_1000:
1159                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1160                                 break;
1161                 }
1162         }
1163         else {
1164                 val |= BNX2_EMAC_MODE_PORT_GMII;
1165         }
1166
1167         /* Set the MAC to operate in the appropriate duplex mode. */
1168         if (bp->duplex == DUPLEX_HALF)
1169                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1170         REG_WR(bp, BNX2_EMAC_MODE, val);
1171
1172         /* Enable/disable rx PAUSE. */
1173         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1174
1175         if (bp->flow_ctrl & FLOW_CTRL_RX)
1176                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1177         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1178
1179         /* Enable/disable tx PAUSE. */
1180         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1181         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1182
1183         if (bp->flow_ctrl & FLOW_CTRL_TX)
1184                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1185         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1186
1187         /* Acknowledge the interrupt. */
1188         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1189
1190         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1191                 bnx2_init_all_rx_contexts(bp);
1192
1193         return 0;
1194 }
1195
1196 static void
1197 bnx2_enable_bmsr1(struct bnx2 *bp)
1198 {
1199         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1200             (CHIP_NUM(bp) == CHIP_NUM_5709))
1201                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1202                                MII_BNX2_BLK_ADDR_GP_STATUS);
1203 }
1204
1205 static void
1206 bnx2_disable_bmsr1(struct bnx2 *bp)
1207 {
1208         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1209             (CHIP_NUM(bp) == CHIP_NUM_5709))
1210                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1211                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1212 }
1213
1214 static int
1215 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1216 {
1217         u32 up1;
1218         int ret = 1;
1219
1220         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1221                 return 0;
1222
1223         if (bp->autoneg & AUTONEG_SPEED)
1224                 bp->advertising |= ADVERTISED_2500baseX_Full;
1225
1226         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1227                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1228
1229         bnx2_read_phy(bp, bp->mii_up1, &up1);
1230         if (!(up1 & BCM5708S_UP1_2G5)) {
1231                 up1 |= BCM5708S_UP1_2G5;
1232                 bnx2_write_phy(bp, bp->mii_up1, up1);
1233                 ret = 0;
1234         }
1235
1236         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1237                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1238                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1239
1240         return ret;
1241 }
1242
1243 static int
1244 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1245 {
1246         u32 up1;
1247         int ret = 0;
1248
1249         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1250                 return 0;
1251
1252         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1253                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1254
1255         bnx2_read_phy(bp, bp->mii_up1, &up1);
1256         if (up1 & BCM5708S_UP1_2G5) {
1257                 up1 &= ~BCM5708S_UP1_2G5;
1258                 bnx2_write_phy(bp, bp->mii_up1, up1);
1259                 ret = 1;
1260         }
1261
1262         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1263                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1264                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1265
1266         return ret;
1267 }
1268
1269 static void
1270 bnx2_enable_forced_2g5(struct bnx2 *bp)
1271 {
1272         u32 bmcr;
1273
1274         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1275                 return;
1276
1277         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1278                 u32 val;
1279
1280                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1281                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1282                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1283                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1284                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1285                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1286
1287                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1288                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1289                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1290
1291         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1292                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1294         }
1295
1296         if (bp->autoneg & AUTONEG_SPEED) {
1297                 bmcr &= ~BMCR_ANENABLE;
1298                 if (bp->req_duplex == DUPLEX_FULL)
1299                         bmcr |= BMCR_FULLDPLX;
1300         }
1301         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1302 }
1303
1304 static void
1305 bnx2_disable_forced_2g5(struct bnx2 *bp)
1306 {
1307         u32 bmcr;
1308
1309         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1310                 return;
1311
1312         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1313                 u32 val;
1314
1315                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1316                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1317                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1318                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1319                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1320
1321                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1323                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1324
1325         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1326                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1327                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1328         }
1329
1330         if (bp->autoneg & AUTONEG_SPEED)
1331                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1332         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1333 }
1334
1335 static void
1336 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1337 {
1338         u32 val;
1339
1340         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1341         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1342         if (start)
1343                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1344         else
1345                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1346 }
1347
1348 static int
1349 bnx2_set_link(struct bnx2 *bp)
1350 {
1351         u32 bmsr;
1352         u8 link_up;
1353
1354         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1355                 bp->link_up = 1;
1356                 return 0;
1357         }
1358
1359         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1360                 return 0;
1361
1362         link_up = bp->link_up;
1363
1364         bnx2_enable_bmsr1(bp);
1365         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1366         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1367         bnx2_disable_bmsr1(bp);
1368
1369         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1371                 u32 val, an_dbg;
1372
1373                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1374                         bnx2_5706s_force_link_dn(bp, 0);
1375                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1376                 }
1377                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1378
1379                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1380                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1381                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1382
1383                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1384                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1385                         bmsr |= BMSR_LSTATUS;
1386                 else
1387                         bmsr &= ~BMSR_LSTATUS;
1388         }
1389
1390         if (bmsr & BMSR_LSTATUS) {
1391                 bp->link_up = 1;
1392
1393                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1394                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395                                 bnx2_5706s_linkup(bp);
1396                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397                                 bnx2_5708s_linkup(bp);
1398                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399                                 bnx2_5709s_linkup(bp);
1400                 }
1401                 else {
1402                         bnx2_copper_linkup(bp);
1403                 }
1404                 bnx2_resolve_flow_ctrl(bp);
1405         }
1406         else {
1407                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1408                     (bp->autoneg & AUTONEG_SPEED))
1409                         bnx2_disable_forced_2g5(bp);
1410
1411                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1412                         u32 bmcr;
1413
1414                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1415                         bmcr |= BMCR_ANENABLE;
1416                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1417
1418                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1419                 }
1420                 bp->link_up = 0;
1421         }
1422
1423         if (bp->link_up != link_up) {
1424                 bnx2_report_link(bp);
1425         }
1426
1427         bnx2_set_mac_link(bp);
1428
1429         return 0;
1430 }
1431
1432 static int
1433 bnx2_reset_phy(struct bnx2 *bp)
1434 {
1435         int i;
1436         u32 reg;
1437
1438         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1439
1440 #define PHY_RESET_MAX_WAIT 100
1441         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1442                 udelay(10);
1443
1444                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1445                 if (!(reg & BMCR_RESET)) {
1446                         udelay(20);
1447                         break;
1448                 }
1449         }
1450         if (i == PHY_RESET_MAX_WAIT) {
1451                 return -EBUSY;
1452         }
1453         return 0;
1454 }
1455
1456 static u32
1457 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1458 {
1459         u32 adv = 0;
1460
1461         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1462                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1463
1464                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1465                         adv = ADVERTISE_1000XPAUSE;
1466                 }
1467                 else {
1468                         adv = ADVERTISE_PAUSE_CAP;
1469                 }
1470         }
1471         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1472                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1473                         adv = ADVERTISE_1000XPSE_ASYM;
1474                 }
1475                 else {
1476                         adv = ADVERTISE_PAUSE_ASYM;
1477                 }
1478         }
1479         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1480                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1481                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1482                 }
1483                 else {
1484                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1485                 }
1486         }
1487         return adv;
1488 }
1489
1490 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1491
1492 static int
1493 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1494 {
1495         u32 speed_arg = 0, pause_adv;
1496
1497         pause_adv = bnx2_phy_get_pause_adv(bp);
1498
1499         if (bp->autoneg & AUTONEG_SPEED) {
1500                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1501                 if (bp->advertising & ADVERTISED_10baseT_Half)
1502                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1503                 if (bp->advertising & ADVERTISED_10baseT_Full)
1504                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1505                 if (bp->advertising & ADVERTISED_100baseT_Half)
1506                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1507                 if (bp->advertising & ADVERTISED_100baseT_Full)
1508                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1509                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1510                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1511                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1512                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1513         } else {
1514                 if (bp->req_line_speed == SPEED_2500)
1515                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1516                 else if (bp->req_line_speed == SPEED_1000)
1517                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1518                 else if (bp->req_line_speed == SPEED_100) {
1519                         if (bp->req_duplex == DUPLEX_FULL)
1520                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1521                         else
1522                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1523                 } else if (bp->req_line_speed == SPEED_10) {
1524                         if (bp->req_duplex == DUPLEX_FULL)
1525                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1526                         else
1527                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1528                 }
1529         }
1530
1531         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1532                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1533         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1534                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1535
1536         if (port == PORT_TP)
1537                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1538                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1539
1540         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1541
1542         spin_unlock_bh(&bp->phy_lock);
1543         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1544         spin_lock_bh(&bp->phy_lock);
1545
1546         return 0;
1547 }
1548
1549 static int
1550 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1551 {
1552         u32 adv, bmcr;
1553         u32 new_adv = 0;
1554
1555         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1556                 return (bnx2_setup_remote_phy(bp, port));
1557
1558         if (!(bp->autoneg & AUTONEG_SPEED)) {
1559                 u32 new_bmcr;
1560                 int force_link_down = 0;
1561
1562                 if (bp->req_line_speed == SPEED_2500) {
1563                         if (!bnx2_test_and_enable_2g5(bp))
1564                                 force_link_down = 1;
1565                 } else if (bp->req_line_speed == SPEED_1000) {
1566                         if (bnx2_test_and_disable_2g5(bp))
1567                                 force_link_down = 1;
1568                 }
1569                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1570                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1571
1572                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1573                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1574                 new_bmcr |= BMCR_SPEED1000;
1575
1576                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1577                         if (bp->req_line_speed == SPEED_2500)
1578                                 bnx2_enable_forced_2g5(bp);
1579                         else if (bp->req_line_speed == SPEED_1000) {
1580                                 bnx2_disable_forced_2g5(bp);
1581                                 new_bmcr &= ~0x2000;
1582                         }
1583
1584                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1585                         if (bp->req_line_speed == SPEED_2500)
1586                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1587                         else
1588                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1589                 }
1590
1591                 if (bp->req_duplex == DUPLEX_FULL) {
1592                         adv |= ADVERTISE_1000XFULL;
1593                         new_bmcr |= BMCR_FULLDPLX;
1594                 }
1595                 else {
1596                         adv |= ADVERTISE_1000XHALF;
1597                         new_bmcr &= ~BMCR_FULLDPLX;
1598                 }
1599                 if ((new_bmcr != bmcr) || (force_link_down)) {
1600                         /* Force a link down visible on the other side */
1601                         if (bp->link_up) {
1602                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1603                                                ~(ADVERTISE_1000XFULL |
1604                                                  ADVERTISE_1000XHALF));
1605                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1606                                         BMCR_ANRESTART | BMCR_ANENABLE);
1607
1608                                 bp->link_up = 0;
1609                                 netif_carrier_off(bp->dev);
1610                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1611                                 bnx2_report_link(bp);
1612                         }
1613                         bnx2_write_phy(bp, bp->mii_adv, adv);
1614                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615                 } else {
1616                         bnx2_resolve_flow_ctrl(bp);
1617                         bnx2_set_mac_link(bp);
1618                 }
1619                 return 0;
1620         }
1621
1622         bnx2_test_and_enable_2g5(bp);
1623
1624         if (bp->advertising & ADVERTISED_1000baseT_Full)
1625                 new_adv |= ADVERTISE_1000XFULL;
1626
1627         new_adv |= bnx2_phy_get_pause_adv(bp);
1628
1629         bnx2_read_phy(bp, bp->mii_adv, &adv);
1630         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1631
1632         bp->serdes_an_pending = 0;
1633         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1634                 /* Force a link down visible on the other side */
1635                 if (bp->link_up) {
1636                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1637                         spin_unlock_bh(&bp->phy_lock);
1638                         msleep(20);
1639                         spin_lock_bh(&bp->phy_lock);
1640                 }
1641
1642                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1643                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1644                         BMCR_ANENABLE);
1645                 /* Speed up link-up time when the link partner
1646                  * does not autonegotiate which is very common
1647                  * in blade servers. Some blade servers use
1648                  * IPMI for kerboard input and it's important
1649                  * to minimize link disruptions. Autoneg. involves
1650                  * exchanging base pages plus 3 next pages and
1651                  * normally completes in about 120 msec.
1652                  */
1653                 bp->current_interval = SERDES_AN_TIMEOUT;
1654                 bp->serdes_an_pending = 1;
1655                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1656         } else {
1657                 bnx2_resolve_flow_ctrl(bp);
1658                 bnx2_set_mac_link(bp);
1659         }
1660
1661         return 0;
1662 }
1663
1664 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1665         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1666                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1667                 (ADVERTISED_1000baseT_Full)
1668
1669 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1670         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1671         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1672         ADVERTISED_1000baseT_Full)
1673
1674 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1675         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1676
1677 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1678
1679 static void
1680 bnx2_set_default_remote_link(struct bnx2 *bp)
1681 {
1682         u32 link;
1683
1684         if (bp->phy_port == PORT_TP)
1685                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1686         else
1687                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1688
1689         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1690                 bp->req_line_speed = 0;
1691                 bp->autoneg |= AUTONEG_SPEED;
1692                 bp->advertising = ADVERTISED_Autoneg;
1693                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1694                         bp->advertising |= ADVERTISED_10baseT_Half;
1695                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1696                         bp->advertising |= ADVERTISED_10baseT_Full;
1697                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1698                         bp->advertising |= ADVERTISED_100baseT_Half;
1699                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1700                         bp->advertising |= ADVERTISED_100baseT_Full;
1701                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1702                         bp->advertising |= ADVERTISED_1000baseT_Full;
1703                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1704                         bp->advertising |= ADVERTISED_2500baseX_Full;
1705         } else {
1706                 bp->autoneg = 0;
1707                 bp->advertising = 0;
1708                 bp->req_duplex = DUPLEX_FULL;
1709                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1710                         bp->req_line_speed = SPEED_10;
1711                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1712                                 bp->req_duplex = DUPLEX_HALF;
1713                 }
1714                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1715                         bp->req_line_speed = SPEED_100;
1716                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1717                                 bp->req_duplex = DUPLEX_HALF;
1718                 }
1719                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1720                         bp->req_line_speed = SPEED_1000;
1721                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1722                         bp->req_line_speed = SPEED_2500;
1723         }
1724 }
1725
1726 static void
1727 bnx2_set_default_link(struct bnx2 *bp)
1728 {
1729         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1730                 bnx2_set_default_remote_link(bp);
1731                 return;
1732         }
1733
1734         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1735         bp->req_line_speed = 0;
1736         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1737                 u32 reg;
1738
1739                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1740
1741                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1742                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1743                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1744                         bp->autoneg = 0;
1745                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1746                         bp->req_duplex = DUPLEX_FULL;
1747                 }
1748         } else
1749                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1750 }
1751
1752 static void
1753 bnx2_send_heart_beat(struct bnx2 *bp)
1754 {
1755         u32 msg;
1756         u32 addr;
1757
1758         spin_lock(&bp->indirect_lock);
1759         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1760         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1761         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1762         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1763         spin_unlock(&bp->indirect_lock);
1764 }
1765
1766 static void
1767 bnx2_remote_phy_event(struct bnx2 *bp)
1768 {
1769         u32 msg;
1770         u8 link_up = bp->link_up;
1771         u8 old_port;
1772
1773         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1774
1775         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1776                 bnx2_send_heart_beat(bp);
1777
1778         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1779
1780         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1781                 bp->link_up = 0;
1782         else {
1783                 u32 speed;
1784
1785                 bp->link_up = 1;
1786                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1787                 bp->duplex = DUPLEX_FULL;
1788                 switch (speed) {
1789                         case BNX2_LINK_STATUS_10HALF:
1790                                 bp->duplex = DUPLEX_HALF;
1791                         case BNX2_LINK_STATUS_10FULL:
1792                                 bp->line_speed = SPEED_10;
1793                                 break;
1794                         case BNX2_LINK_STATUS_100HALF:
1795                                 bp->duplex = DUPLEX_HALF;
1796                         case BNX2_LINK_STATUS_100BASE_T4:
1797                         case BNX2_LINK_STATUS_100FULL:
1798                                 bp->line_speed = SPEED_100;
1799                                 break;
1800                         case BNX2_LINK_STATUS_1000HALF:
1801                                 bp->duplex = DUPLEX_HALF;
1802                         case BNX2_LINK_STATUS_1000FULL:
1803                                 bp->line_speed = SPEED_1000;
1804                                 break;
1805                         case BNX2_LINK_STATUS_2500HALF:
1806                                 bp->duplex = DUPLEX_HALF;
1807                         case BNX2_LINK_STATUS_2500FULL:
1808                                 bp->line_speed = SPEED_2500;
1809                                 break;
1810                         default:
1811                                 bp->line_speed = 0;
1812                                 break;
1813                 }
1814
1815                 bp->flow_ctrl = 0;
1816                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1817                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1818                         if (bp->duplex == DUPLEX_FULL)
1819                                 bp->flow_ctrl = bp->req_flow_ctrl;
1820                 } else {
1821                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1822                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1823                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1824                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1825                 }
1826
1827                 old_port = bp->phy_port;
1828                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1829                         bp->phy_port = PORT_FIBRE;
1830                 else
1831                         bp->phy_port = PORT_TP;
1832
1833                 if (old_port != bp->phy_port)
1834                         bnx2_set_default_link(bp);
1835
1836         }
1837         if (bp->link_up != link_up)
1838                 bnx2_report_link(bp);
1839
1840         bnx2_set_mac_link(bp);
1841 }
1842
1843 static int
1844 bnx2_set_remote_link(struct bnx2 *bp)
1845 {
1846         u32 evt_code;
1847
1848         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1849         switch (evt_code) {
1850                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1851                         bnx2_remote_phy_event(bp);
1852                         break;
1853                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1854                 default:
1855                         bnx2_send_heart_beat(bp);
1856                         break;
1857         }
1858         return 0;
1859 }
1860
1861 static int
1862 bnx2_setup_copper_phy(struct bnx2 *bp)
1863 {
1864         u32 bmcr;
1865         u32 new_bmcr;
1866
1867         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1868
1869         if (bp->autoneg & AUTONEG_SPEED) {
1870                 u32 adv_reg, adv1000_reg;
1871                 u32 new_adv_reg = 0;
1872                 u32 new_adv1000_reg = 0;
1873
1874                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1875                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1876                         ADVERTISE_PAUSE_ASYM);
1877
1878                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1879                 adv1000_reg &= PHY_ALL_1000_SPEED;
1880
1881                 if (bp->advertising & ADVERTISED_10baseT_Half)
1882                         new_adv_reg |= ADVERTISE_10HALF;
1883                 if (bp->advertising & ADVERTISED_10baseT_Full)
1884                         new_adv_reg |= ADVERTISE_10FULL;
1885                 if (bp->advertising & ADVERTISED_100baseT_Half)
1886                         new_adv_reg |= ADVERTISE_100HALF;
1887                 if (bp->advertising & ADVERTISED_100baseT_Full)
1888                         new_adv_reg |= ADVERTISE_100FULL;
1889                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1890                         new_adv1000_reg |= ADVERTISE_1000FULL;
1891
1892                 new_adv_reg |= ADVERTISE_CSMA;
1893
1894                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1895
1896                 if ((adv1000_reg != new_adv1000_reg) ||
1897                         (adv_reg != new_adv_reg) ||
1898                         ((bmcr & BMCR_ANENABLE) == 0)) {
1899
1900                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1901                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1902                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1903                                 BMCR_ANENABLE);
1904                 }
1905                 else if (bp->link_up) {
1906                         /* Flow ctrl may have changed from auto to forced */
1907                         /* or vice-versa. */
1908
1909                         bnx2_resolve_flow_ctrl(bp);
1910                         bnx2_set_mac_link(bp);
1911                 }
1912                 return 0;
1913         }
1914
1915         new_bmcr = 0;
1916         if (bp->req_line_speed == SPEED_100) {
1917                 new_bmcr |= BMCR_SPEED100;
1918         }
1919         if (bp->req_duplex == DUPLEX_FULL) {
1920                 new_bmcr |= BMCR_FULLDPLX;
1921         }
1922         if (new_bmcr != bmcr) {
1923                 u32 bmsr;
1924
1925                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1926                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1927
1928                 if (bmsr & BMSR_LSTATUS) {
1929                         /* Force link down */
1930                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1931                         spin_unlock_bh(&bp->phy_lock);
1932                         msleep(50);
1933                         spin_lock_bh(&bp->phy_lock);
1934
1935                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1936                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1937                 }
1938
1939                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1940
1941                 /* Normally, the new speed is setup after the link has
1942                  * gone down and up again. In some cases, link will not go
1943                  * down so we need to set up the new speed here.
1944                  */
1945                 if (bmsr & BMSR_LSTATUS) {
1946                         bp->line_speed = bp->req_line_speed;
1947                         bp->duplex = bp->req_duplex;
1948                         bnx2_resolve_flow_ctrl(bp);
1949                         bnx2_set_mac_link(bp);
1950                 }
1951         } else {
1952                 bnx2_resolve_flow_ctrl(bp);
1953                 bnx2_set_mac_link(bp);
1954         }
1955         return 0;
1956 }
1957
1958 static int
1959 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1960 {
1961         if (bp->loopback == MAC_LOOPBACK)
1962                 return 0;
1963
1964         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1965                 return (bnx2_setup_serdes_phy(bp, port));
1966         }
1967         else {
1968                 return (bnx2_setup_copper_phy(bp));
1969         }
1970 }
1971
1972 static int
1973 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1974 {
1975         u32 val;
1976
1977         bp->mii_bmcr = MII_BMCR + 0x10;
1978         bp->mii_bmsr = MII_BMSR + 0x10;
1979         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1980         bp->mii_adv = MII_ADVERTISE + 0x10;
1981         bp->mii_lpa = MII_LPA + 0x10;
1982         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1983
1984         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1985         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1986
1987         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1988         if (reset_phy)
1989                 bnx2_reset_phy(bp);
1990
1991         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1992
1993         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1994         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1995         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1996         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1997
1998         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1999         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2000         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2001                 val |= BCM5708S_UP1_2G5;
2002         else
2003                 val &= ~BCM5708S_UP1_2G5;
2004         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2005
2006         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2007         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2008         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2009         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2010
2011         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2012
2013         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2014               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2015         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2016
2017         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2018
2019         return 0;
2020 }
2021
2022 static int
2023 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2024 {
2025         u32 val;
2026
2027         if (reset_phy)
2028                 bnx2_reset_phy(bp);
2029
2030         bp->mii_up1 = BCM5708S_UP1;
2031
2032         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2033         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2034         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2035
2036         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2037         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2038         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2039
2040         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2041         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2042         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2043
2044         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2045                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2046                 val |= BCM5708S_UP1_2G5;
2047                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2048         }
2049
2050         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2051             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2052             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2053                 /* increase tx signal amplitude */
2054                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2055                                BCM5708S_BLK_ADDR_TX_MISC);
2056                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2057                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2058                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2059                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2060         }
2061
2062         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2063               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2064
2065         if (val) {
2066                 u32 is_backplane;
2067
2068                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2069                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2070                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2071                                        BCM5708S_BLK_ADDR_TX_MISC);
2072                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2073                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074                                        BCM5708S_BLK_ADDR_DIG);
2075                 }
2076         }
2077         return 0;
2078 }
2079
2080 static int
2081 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2082 {
2083         if (reset_phy)
2084                 bnx2_reset_phy(bp);
2085
2086         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2087
2088         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2089                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2090
2091         if (bp->dev->mtu > 1500) {
2092                 u32 val;
2093
2094                 /* Set extended packet length bit */
2095                 bnx2_write_phy(bp, 0x18, 0x7);
2096                 bnx2_read_phy(bp, 0x18, &val);
2097                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2098
2099                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2100                 bnx2_read_phy(bp, 0x1c, &val);
2101                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2102         }
2103         else {
2104                 u32 val;
2105
2106                 bnx2_write_phy(bp, 0x18, 0x7);
2107                 bnx2_read_phy(bp, 0x18, &val);
2108                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2109
2110                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2111                 bnx2_read_phy(bp, 0x1c, &val);
2112                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2113         }
2114
2115         return 0;
2116 }
2117
2118 static int
2119 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2120 {
2121         u32 val;
2122
2123         if (reset_phy)
2124                 bnx2_reset_phy(bp);
2125
2126         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2127                 bnx2_write_phy(bp, 0x18, 0x0c00);
2128                 bnx2_write_phy(bp, 0x17, 0x000a);
2129                 bnx2_write_phy(bp, 0x15, 0x310b);
2130                 bnx2_write_phy(bp, 0x17, 0x201f);
2131                 bnx2_write_phy(bp, 0x15, 0x9506);
2132                 bnx2_write_phy(bp, 0x17, 0x401f);
2133                 bnx2_write_phy(bp, 0x15, 0x14e2);
2134                 bnx2_write_phy(bp, 0x18, 0x0400);
2135         }
2136
2137         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2138                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2139                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2140                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2141                 val &= ~(1 << 8);
2142                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2143         }
2144
2145         if (bp->dev->mtu > 1500) {
2146                 /* Set extended packet length bit */
2147                 bnx2_write_phy(bp, 0x18, 0x7);
2148                 bnx2_read_phy(bp, 0x18, &val);
2149                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2150
2151                 bnx2_read_phy(bp, 0x10, &val);
2152                 bnx2_write_phy(bp, 0x10, val | 0x1);
2153         }
2154         else {
2155                 bnx2_write_phy(bp, 0x18, 0x7);
2156                 bnx2_read_phy(bp, 0x18, &val);
2157                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2158
2159                 bnx2_read_phy(bp, 0x10, &val);
2160                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2161         }
2162
2163         /* ethernet@wirespeed */
2164         bnx2_write_phy(bp, 0x18, 0x7007);
2165         bnx2_read_phy(bp, 0x18, &val);
2166         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2167         return 0;
2168 }
2169
2170
2171 static int
2172 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2173 {
2174         u32 val;
2175         int rc = 0;
2176
2177         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2178         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2179
2180         bp->mii_bmcr = MII_BMCR;
2181         bp->mii_bmsr = MII_BMSR;
2182         bp->mii_bmsr1 = MII_BMSR;
2183         bp->mii_adv = MII_ADVERTISE;
2184         bp->mii_lpa = MII_LPA;
2185
2186         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2187
2188         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2189                 goto setup_phy;
2190
2191         bnx2_read_phy(bp, MII_PHYSID1, &val);
2192         bp->phy_id = val << 16;
2193         bnx2_read_phy(bp, MII_PHYSID2, &val);
2194         bp->phy_id |= val & 0xffff;
2195
2196         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2197                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2198                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2199                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2200                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2201                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2202                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2203         }
2204         else {
2205                 rc = bnx2_init_copper_phy(bp, reset_phy);
2206         }
2207
2208 setup_phy:
2209         if (!rc)
2210                 rc = bnx2_setup_phy(bp, bp->phy_port);
2211
2212         return rc;
2213 }
2214
2215 static int
2216 bnx2_set_mac_loopback(struct bnx2 *bp)
2217 {
2218         u32 mac_mode;
2219
2220         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2221         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2222         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2223         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2224         bp->link_up = 1;
2225         return 0;
2226 }
2227
2228 static int bnx2_test_link(struct bnx2 *);
2229
2230 static int
2231 bnx2_set_phy_loopback(struct bnx2 *bp)
2232 {
2233         u32 mac_mode;
2234         int rc, i;
2235
2236         spin_lock_bh(&bp->phy_lock);
2237         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2238                             BMCR_SPEED1000);
2239         spin_unlock_bh(&bp->phy_lock);
2240         if (rc)
2241                 return rc;
2242
2243         for (i = 0; i < 10; i++) {
2244                 if (bnx2_test_link(bp) == 0)
2245                         break;
2246                 msleep(100);
2247         }
2248
2249         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2250         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2251                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2252                       BNX2_EMAC_MODE_25G_MODE);
2253
2254         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2255         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2256         bp->link_up = 1;
2257         return 0;
2258 }
2259
2260 static int
2261 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2262 {
2263         int i;
2264         u32 val;
2265
2266         bp->fw_wr_seq++;
2267         msg_data |= bp->fw_wr_seq;
2268
2269         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2270
2271         /* wait for an acknowledgement. */
2272         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2273                 msleep(10);
2274
2275                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2276
2277                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2278                         break;
2279         }
2280         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2281                 return 0;
2282
2283         /* If we timed out, inform the firmware that this is the case. */
2284         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2285                 if (!silent)
2286                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2287                                             "%x\n", msg_data);
2288
2289                 msg_data &= ~BNX2_DRV_MSG_CODE;
2290                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2291
2292                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2293
2294                 return -EBUSY;
2295         }
2296
2297         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2298                 return -EIO;
2299
2300         return 0;
2301 }
2302
2303 static int
2304 bnx2_init_5709_context(struct bnx2 *bp)
2305 {
2306         int i, ret = 0;
2307         u32 val;
2308
2309         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2310         val |= (BCM_PAGE_BITS - 8) << 16;
2311         REG_WR(bp, BNX2_CTX_COMMAND, val);
2312         for (i = 0; i < 10; i++) {
2313                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2314                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2315                         break;
2316                 udelay(2);
2317         }
2318         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2319                 return -EBUSY;
2320
2321         for (i = 0; i < bp->ctx_pages; i++) {
2322                 int j;
2323
2324                 if (bp->ctx_blk[i])
2325                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2326                 else
2327                         return -ENOMEM;
2328
2329                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2330                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2331                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2332                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2333                        (u64) bp->ctx_blk_mapping[i] >> 32);
2334                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2335                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2336                 for (j = 0; j < 10; j++) {
2337
2338                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2339                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2340                                 break;
2341                         udelay(5);
2342                 }
2343                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2344                         ret = -EBUSY;
2345                         break;
2346                 }
2347         }
2348         return ret;
2349 }
2350
2351 static void
2352 bnx2_init_context(struct bnx2 *bp)
2353 {
2354         u32 vcid;
2355
2356         vcid = 96;
2357         while (vcid) {
2358                 u32 vcid_addr, pcid_addr, offset;
2359                 int i;
2360
2361                 vcid--;
2362
2363                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2364                         u32 new_vcid;
2365
2366                         vcid_addr = GET_PCID_ADDR(vcid);
2367                         if (vcid & 0x8) {
2368                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2369                         }
2370                         else {
2371                                 new_vcid = vcid;
2372                         }
2373                         pcid_addr = GET_PCID_ADDR(new_vcid);
2374                 }
2375                 else {
2376                         vcid_addr = GET_CID_ADDR(vcid);
2377                         pcid_addr = vcid_addr;
2378                 }
2379
2380                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2381                         vcid_addr += (i << PHY_CTX_SHIFT);
2382                         pcid_addr += (i << PHY_CTX_SHIFT);
2383
2384                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2385                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2386
2387                         /* Zero out the context. */
2388                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2389                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2390                 }
2391         }
2392 }
2393
2394 static int
2395 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2396 {
2397         u16 *good_mbuf;
2398         u32 good_mbuf_cnt;
2399         u32 val;
2400
2401         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2402         if (good_mbuf == NULL) {
2403                 printk(KERN_ERR PFX "Failed to allocate memory in "
2404                                     "bnx2_alloc_bad_rbuf\n");
2405                 return -ENOMEM;
2406         }
2407
2408         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2409                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2410
2411         good_mbuf_cnt = 0;
2412
2413         /* Allocate a bunch of mbufs and save the good ones in an array. */
2414         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2415         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2416                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2417                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2418
2419                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2420
2421                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2422
2423                 /* The addresses with Bit 9 set are bad memory blocks. */
2424                 if (!(val & (1 << 9))) {
2425                         good_mbuf[good_mbuf_cnt] = (u16) val;
2426                         good_mbuf_cnt++;
2427                 }
2428
2429                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2430         }
2431
2432         /* Free the good ones back to the mbuf pool thus discarding
2433          * all the bad ones. */
2434         while (good_mbuf_cnt) {
2435                 good_mbuf_cnt--;
2436
2437                 val = good_mbuf[good_mbuf_cnt];
2438                 val = (val << 9) | val | 1;
2439
2440                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2441         }
2442         kfree(good_mbuf);
2443         return 0;
2444 }
2445
2446 static void
2447 bnx2_set_mac_addr(struct bnx2 *bp)
2448 {
2449         u32 val;
2450         u8 *mac_addr = bp->dev->dev_addr;
2451
2452         val = (mac_addr[0] << 8) | mac_addr[1];
2453
2454         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2455
2456         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2457                 (mac_addr[4] << 8) | mac_addr[5];
2458
2459         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2460 }
2461
2462 static inline int
2463 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2464 {
2465         dma_addr_t mapping;
2466         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2467         struct rx_bd *rxbd =
2468                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2469         struct page *page = alloc_page(GFP_ATOMIC);
2470
2471         if (!page)
2472                 return -ENOMEM;
2473         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2474                                PCI_DMA_FROMDEVICE);
2475         rx_pg->page = page;
2476         pci_unmap_addr_set(rx_pg, mapping, mapping);
2477         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2478         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2479         return 0;
2480 }
2481
2482 static void
2483 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2484 {
2485         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2486         struct page *page = rx_pg->page;
2487
2488         if (!page)
2489                 return;
2490
2491         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2492                        PCI_DMA_FROMDEVICE);
2493
2494         __free_page(page);
2495         rx_pg->page = NULL;
2496 }
2497
2498 static inline int
2499 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2500 {
2501         struct sk_buff *skb;
2502         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2503         dma_addr_t mapping;
2504         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2505         unsigned long align;
2506
2507         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2508         if (skb == NULL) {
2509                 return -ENOMEM;
2510         }
2511
2512         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2513                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2514
2515         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2516                 PCI_DMA_FROMDEVICE);
2517
2518         rx_buf->skb = skb;
2519         pci_unmap_addr_set(rx_buf, mapping, mapping);
2520
2521         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2522         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2523
2524         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2525
2526         return 0;
2527 }
2528
2529 static int
2530 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2531 {
2532         struct status_block *sblk = bnapi->status_blk.msi;
2533         u32 new_link_state, old_link_state;
2534         int is_set = 1;
2535
2536         new_link_state = sblk->status_attn_bits & event;
2537         old_link_state = sblk->status_attn_bits_ack & event;
2538         if (new_link_state != old_link_state) {
2539                 if (new_link_state)
2540                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2541                 else
2542                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2543         } else
2544                 is_set = 0;
2545
2546         return is_set;
2547 }
2548
2549 static void
2550 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2551 {
2552         spin_lock(&bp->phy_lock);
2553
2554         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2555                 bnx2_set_link(bp);
2556         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2557                 bnx2_set_remote_link(bp);
2558
2559         spin_unlock(&bp->phy_lock);
2560
2561 }
2562
2563 static inline u16
2564 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2565 {
2566         u16 cons;
2567
2568         /* Tell compiler that status block fields can change. */
2569         barrier();
2570         cons = *bnapi->hw_tx_cons_ptr;
2571         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2572                 cons++;
2573         return cons;
2574 }
2575
2576 static int
2577 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2578 {
2579         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2580         u16 hw_cons, sw_cons, sw_ring_cons;
2581         int tx_pkt = 0;
2582
2583         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2584         sw_cons = txr->tx_cons;
2585
2586         while (sw_cons != hw_cons) {
2587                 struct sw_bd *tx_buf;
2588                 struct sk_buff *skb;
2589                 int i, last;
2590
2591                 sw_ring_cons = TX_RING_IDX(sw_cons);
2592
2593                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2594                 skb = tx_buf->skb;
2595
2596                 /* partial BD completions possible with TSO packets */
2597                 if (skb_is_gso(skb)) {
2598                         u16 last_idx, last_ring_idx;
2599
2600                         last_idx = sw_cons +
2601                                 skb_shinfo(skb)->nr_frags + 1;
2602                         last_ring_idx = sw_ring_cons +
2603                                 skb_shinfo(skb)->nr_frags + 1;
2604                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2605                                 last_idx++;
2606                         }
2607                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2608                                 break;
2609                         }
2610                 }
2611
2612                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2613                         skb_headlen(skb), PCI_DMA_TODEVICE);
2614
2615                 tx_buf->skb = NULL;
2616                 last = skb_shinfo(skb)->nr_frags;
2617
2618                 for (i = 0; i < last; i++) {
2619                         sw_cons = NEXT_TX_BD(sw_cons);
2620
2621                         pci_unmap_page(bp->pdev,
2622                                 pci_unmap_addr(
2623                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2624                                         mapping),
2625                                 skb_shinfo(skb)->frags[i].size,
2626                                 PCI_DMA_TODEVICE);
2627                 }
2628
2629                 sw_cons = NEXT_TX_BD(sw_cons);
2630
2631                 dev_kfree_skb(skb);
2632                 tx_pkt++;
2633                 if (tx_pkt == budget)
2634                         break;
2635
2636                 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2637         }
2638
2639         txr->hw_tx_cons = hw_cons;
2640         txr->tx_cons = sw_cons;
2641         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2642          * before checking for netif_queue_stopped().  Without the
2643          * memory barrier, there is a small possibility that bnx2_start_xmit()
2644          * will miss it and cause the queue to be stopped forever.
2645          */
2646         smp_mb();
2647
2648         if (unlikely(netif_queue_stopped(bp->dev)) &&
2649                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2650                 netif_tx_lock(bp->dev);
2651                 if ((netif_queue_stopped(bp->dev)) &&
2652                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2653                         netif_wake_queue(bp->dev);
2654                 netif_tx_unlock(bp->dev);
2655         }
2656         return tx_pkt;
2657 }
2658
2659 static void
2660 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2661                         struct sk_buff *skb, int count)
2662 {
2663         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2664         struct rx_bd *cons_bd, *prod_bd;
2665         dma_addr_t mapping;
2666         int i;
2667         u16 hw_prod = rxr->rx_pg_prod, prod;
2668         u16 cons = rxr->rx_pg_cons;
2669
2670         for (i = 0; i < count; i++) {
2671                 prod = RX_PG_RING_IDX(hw_prod);
2672
2673                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2674                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2675                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2676                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2677
2678                 if (i == 0 && skb) {
2679                         struct page *page;
2680                         struct skb_shared_info *shinfo;
2681
2682                         shinfo = skb_shinfo(skb);
2683                         shinfo->nr_frags--;
2684                         page = shinfo->frags[shinfo->nr_frags].page;
2685                         shinfo->frags[shinfo->nr_frags].page = NULL;
2686                         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2687                                                PCI_DMA_FROMDEVICE);
2688                         cons_rx_pg->page = page;
2689                         pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2690                         dev_kfree_skb(skb);
2691                 }
2692                 if (prod != cons) {
2693                         prod_rx_pg->page = cons_rx_pg->page;
2694                         cons_rx_pg->page = NULL;
2695                         pci_unmap_addr_set(prod_rx_pg, mapping,
2696                                 pci_unmap_addr(cons_rx_pg, mapping));
2697
2698                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2699                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2700
2701                 }
2702                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2703                 hw_prod = NEXT_RX_BD(hw_prod);
2704         }
2705         rxr->rx_pg_prod = hw_prod;
2706         rxr->rx_pg_cons = cons;
2707 }
2708
2709 static inline void
2710 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2711                   struct sk_buff *skb, u16 cons, u16 prod)
2712 {
2713         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2714         struct rx_bd *cons_bd, *prod_bd;
2715
2716         cons_rx_buf = &rxr->rx_buf_ring[cons];
2717         prod_rx_buf = &rxr->rx_buf_ring[prod];
2718
2719         pci_dma_sync_single_for_device(bp->pdev,
2720                 pci_unmap_addr(cons_rx_buf, mapping),
2721                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2722
2723         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2724
2725         prod_rx_buf->skb = skb;
2726
2727         if (cons == prod)
2728                 return;
2729
2730         pci_unmap_addr_set(prod_rx_buf, mapping,
2731                         pci_unmap_addr(cons_rx_buf, mapping));
2732
2733         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2734         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2735         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2736         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2737 }
2738
2739 static int
2740 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2741             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2742             u32 ring_idx)
2743 {
2744         int err;
2745         u16 prod = ring_idx & 0xffff;
2746
2747         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2748         if (unlikely(err)) {
2749                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2750                 if (hdr_len) {
2751                         unsigned int raw_len = len + 4;
2752                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2753
2754                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2755                 }
2756                 return err;
2757         }
2758
2759         skb_reserve(skb, BNX2_RX_OFFSET);
2760         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2761                          PCI_DMA_FROMDEVICE);
2762
2763         if (hdr_len == 0) {
2764                 skb_put(skb, len);
2765                 return 0;
2766         } else {
2767                 unsigned int i, frag_len, frag_size, pages;
2768                 struct sw_pg *rx_pg;
2769                 u16 pg_cons = rxr->rx_pg_cons;
2770                 u16 pg_prod = rxr->rx_pg_prod;
2771
2772                 frag_size = len + 4 - hdr_len;
2773                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2774                 skb_put(skb, hdr_len);
2775
2776                 for (i = 0; i < pages; i++) {
2777                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2778                         if (unlikely(frag_len <= 4)) {
2779                                 unsigned int tail = 4 - frag_len;
2780
2781                                 rxr->rx_pg_cons = pg_cons;
2782                                 rxr->rx_pg_prod = pg_prod;
2783                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2784                                                         pages - i);
2785                                 skb->len -= tail;
2786                                 if (i == 0) {
2787                                         skb->tail -= tail;
2788                                 } else {
2789                                         skb_frag_t *frag =
2790                                                 &skb_shinfo(skb)->frags[i - 1];
2791                                         frag->size -= tail;
2792                                         skb->data_len -= tail;
2793                                         skb->truesize -= tail;
2794                                 }
2795                                 return 0;
2796                         }
2797                         rx_pg = &rxr->rx_pg_ring[pg_cons];
2798
2799                         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2800                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2801
2802                         if (i == pages - 1)
2803                                 frag_len -= 4;
2804
2805                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2806                         rx_pg->page = NULL;
2807
2808                         err = bnx2_alloc_rx_page(bp, rxr,
2809                                                  RX_PG_RING_IDX(pg_prod));
2810                         if (unlikely(err)) {
2811                                 rxr->rx_pg_cons = pg_cons;
2812                                 rxr->rx_pg_prod = pg_prod;
2813                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2814                                                         pages - i);
2815                                 return err;
2816                         }
2817
2818                         frag_size -= frag_len;
2819                         skb->data_len += frag_len;
2820                         skb->truesize += frag_len;
2821                         skb->len += frag_len;
2822
2823                         pg_prod = NEXT_RX_BD(pg_prod);
2824                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2825                 }
2826                 rxr->rx_pg_prod = pg_prod;
2827                 rxr->rx_pg_cons = pg_cons;
2828         }
2829         return 0;
2830 }
2831
2832 static inline u16
2833 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2834 {
2835         u16 cons;
2836
2837         /* Tell compiler that status block fields can change. */
2838         barrier();
2839         cons = *bnapi->hw_rx_cons_ptr;
2840         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2841                 cons++;
2842         return cons;
2843 }
2844
2845 static int
2846 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2847 {
2848         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2849         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2850         struct l2_fhdr *rx_hdr;
2851         int rx_pkt = 0, pg_ring_used = 0;
2852
2853         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2854         sw_cons = rxr->rx_cons;
2855         sw_prod = rxr->rx_prod;
2856
2857         /* Memory barrier necessary as speculative reads of the rx
2858          * buffer can be ahead of the index in the status block
2859          */
2860         rmb();
2861         while (sw_cons != hw_cons) {
2862                 unsigned int len, hdr_len;
2863                 u32 status;
2864                 struct sw_bd *rx_buf;
2865                 struct sk_buff *skb;
2866                 dma_addr_t dma_addr;
2867
2868                 sw_ring_cons = RX_RING_IDX(sw_cons);
2869                 sw_ring_prod = RX_RING_IDX(sw_prod);
2870
2871                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2872                 skb = rx_buf->skb;
2873
2874                 rx_buf->skb = NULL;
2875
2876                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2877
2878                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2879                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2880                         PCI_DMA_FROMDEVICE);
2881
2882                 rx_hdr = (struct l2_fhdr *) skb->data;
2883                 len = rx_hdr->l2_fhdr_pkt_len;
2884
2885                 if ((status = rx_hdr->l2_fhdr_status) &
2886                         (L2_FHDR_ERRORS_BAD_CRC |
2887                         L2_FHDR_ERRORS_PHY_DECODE |
2888                         L2_FHDR_ERRORS_ALIGNMENT |
2889                         L2_FHDR_ERRORS_TOO_SHORT |
2890                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2891
2892                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2893                                           sw_ring_prod);
2894                         goto next_rx;
2895                 }
2896                 hdr_len = 0;
2897                 if (status & L2_FHDR_STATUS_SPLIT) {
2898                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2899                         pg_ring_used = 1;
2900                 } else if (len > bp->rx_jumbo_thresh) {
2901                         hdr_len = bp->rx_jumbo_thresh;
2902                         pg_ring_used = 1;
2903                 }
2904
2905                 len -= 4;
2906
2907                 if (len <= bp->rx_copy_thresh) {
2908                         struct sk_buff *new_skb;
2909
2910                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2911                         if (new_skb == NULL) {
2912                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2913                                                   sw_ring_prod);
2914                                 goto next_rx;
2915                         }
2916
2917                         /* aligned copy */
2918                         skb_copy_from_linear_data_offset(skb,
2919                                                          BNX2_RX_OFFSET - 2,
2920                                       new_skb->data, len + 2);
2921                         skb_reserve(new_skb, 2);
2922                         skb_put(new_skb, len);
2923
2924                         bnx2_reuse_rx_skb(bp, rxr, skb,
2925                                 sw_ring_cons, sw_ring_prod);
2926
2927                         skb = new_skb;
2928                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2929                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2930                         goto next_rx;
2931
2932                 skb->protocol = eth_type_trans(skb, bp->dev);
2933
2934                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2935                         (ntohs(skb->protocol) != 0x8100)) {
2936
2937                         dev_kfree_skb(skb);
2938                         goto next_rx;
2939
2940                 }
2941
2942                 skb->ip_summed = CHECKSUM_NONE;
2943                 if (bp->rx_csum &&
2944                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2945                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2946
2947                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2948                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2949                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2950                 }
2951
2952 #ifdef BCM_VLAN
2953                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2954                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2955                                 rx_hdr->l2_fhdr_vlan_tag);
2956                 }
2957                 else
2958 #endif
2959                         netif_receive_skb(skb);
2960
2961                 bp->dev->last_rx = jiffies;
2962                 rx_pkt++;
2963
2964 next_rx:
2965                 sw_cons = NEXT_RX_BD(sw_cons);
2966                 sw_prod = NEXT_RX_BD(sw_prod);
2967
2968                 if ((rx_pkt == budget))
2969                         break;
2970
2971                 /* Refresh hw_cons to see if there is new work */
2972                 if (sw_cons == hw_cons) {
2973                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
2974                         rmb();
2975                 }
2976         }
2977         rxr->rx_cons = sw_cons;
2978         rxr->rx_prod = sw_prod;
2979
2980         if (pg_ring_used)
2981                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2982
2983         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2984
2985         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2986
2987         mmiowb();
2988
2989         return rx_pkt;
2990
2991 }
2992
2993 /* MSI ISR - The only difference between this and the INTx ISR
2994  * is that the MSI interrupt is always serviced.
2995  */
2996 static irqreturn_t
2997 bnx2_msi(int irq, void *dev_instance)
2998 {
2999         struct bnx2_napi *bnapi = dev_instance;
3000         struct bnx2 *bp = bnapi->bp;
3001         struct net_device *dev = bp->dev;
3002
3003         prefetch(bnapi->status_blk.msi);
3004         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3005                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3006                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3007
3008         /* Return here if interrupt is disabled. */
3009         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3010                 return IRQ_HANDLED;
3011
3012         netif_rx_schedule(dev, &bnapi->napi);
3013
3014         return IRQ_HANDLED;
3015 }
3016
3017 static irqreturn_t
3018 bnx2_msi_1shot(int irq, void *dev_instance)
3019 {
3020         struct bnx2_napi *bnapi = dev_instance;
3021         struct bnx2 *bp = bnapi->bp;
3022         struct net_device *dev = bp->dev;
3023
3024         prefetch(bnapi->status_blk.msi);
3025
3026         /* Return here if interrupt is disabled. */
3027         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3028                 return IRQ_HANDLED;
3029
3030         netif_rx_schedule(dev, &bnapi->napi);
3031
3032         return IRQ_HANDLED;
3033 }
3034
3035 static irqreturn_t
3036 bnx2_interrupt(int irq, void *dev_instance)
3037 {
3038         struct bnx2_napi *bnapi = dev_instance;
3039         struct bnx2 *bp = bnapi->bp;
3040         struct net_device *dev = bp->dev;
3041         struct status_block *sblk = bnapi->status_blk.msi;
3042
3043         /* When using INTx, it is possible for the interrupt to arrive
3044          * at the CPU before the status block posted prior to the
3045          * interrupt. Reading a register will flush the status block.
3046          * When using MSI, the MSI message will always complete after
3047          * the status block write.
3048          */
3049         if ((sblk->status_idx == bnapi->last_status_idx) &&
3050             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3051              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3052                 return IRQ_NONE;
3053
3054         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3055                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3056                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3057
3058         /* Read back to deassert IRQ immediately to avoid too many
3059          * spurious interrupts.
3060          */
3061         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3062
3063         /* Return here if interrupt is shared and is disabled. */
3064         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3065                 return IRQ_HANDLED;
3066
3067         if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3068                 bnapi->last_status_idx = sblk->status_idx;
3069                 __netif_rx_schedule(dev, &bnapi->napi);
3070         }
3071
3072         return IRQ_HANDLED;
3073 }
3074
3075 static inline int
3076 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3077 {
3078         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3079         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3080
3081         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3082             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3083                 return 1;
3084         return 0;
3085 }
3086
3087 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3088                                  STATUS_ATTN_BITS_TIMER_ABORT)
3089
3090 static inline int
3091 bnx2_has_work(struct bnx2_napi *bnapi)
3092 {
3093         struct status_block *sblk = bnapi->status_blk.msi;
3094
3095         if (bnx2_has_fast_work(bnapi))
3096                 return 1;
3097
3098         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3099             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3100                 return 1;
3101
3102         return 0;
3103 }
3104
3105 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3106 {
3107         struct status_block *sblk = bnapi->status_blk.msi;
3108         u32 status_attn_bits = sblk->status_attn_bits;
3109         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3110
3111         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3112             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3113
3114                 bnx2_phy_int(bp, bnapi);
3115
3116                 /* This is needed to take care of transient status
3117                  * during link changes.
3118                  */
3119                 REG_WR(bp, BNX2_HC_COMMAND,
3120                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3121                 REG_RD(bp, BNX2_HC_COMMAND);
3122         }
3123 }
3124
3125 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3126                           int work_done, int budget)
3127 {
3128         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3129         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3130
3131         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3132                 bnx2_tx_int(bp, bnapi, 0);
3133
3134         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3135                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3136
3137         return work_done;
3138 }
3139
3140 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3141 {
3142         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3143         struct bnx2 *bp = bnapi->bp;
3144         int work_done = 0;
3145         struct status_block_msix *sblk = bnapi->status_blk.msix;
3146
3147         while (1) {
3148                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3149                 if (unlikely(work_done >= budget))
3150                         break;
3151
3152                 bnapi->last_status_idx = sblk->status_idx;
3153                 /* status idx must be read before checking for more work. */
3154                 rmb();
3155                 if (likely(!bnx2_has_fast_work(bnapi))) {
3156
3157                         netif_rx_complete(bp->dev, napi);
3158                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3159                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3160                                bnapi->last_status_idx);
3161                         break;
3162                 }
3163         }
3164         return work_done;
3165 }
3166
3167 static int bnx2_poll(struct napi_struct *napi, int budget)
3168 {
3169         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3170         struct bnx2 *bp = bnapi->bp;
3171         int work_done = 0;
3172         struct status_block *sblk = bnapi->status_blk.msi;
3173
3174         while (1) {
3175                 bnx2_poll_link(bp, bnapi);
3176
3177                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3178
3179                 if (unlikely(work_done >= budget))
3180                         break;
3181
3182                 /* bnapi->last_status_idx is used below to tell the hw how
3183                  * much work has been processed, so we must read it before
3184                  * checking for more work.
3185                  */
3186                 bnapi->last_status_idx = sblk->status_idx;
3187                 rmb();
3188                 if (likely(!bnx2_has_work(bnapi))) {
3189                         netif_rx_complete(bp->dev, napi);
3190                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3191                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3192                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3193                                        bnapi->last_status_idx);
3194                                 break;
3195                         }
3196                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3197                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3198                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3199                                bnapi->last_status_idx);
3200
3201                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3202                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3203                                bnapi->last_status_idx);
3204                         break;
3205                 }
3206         }
3207
3208         return work_done;
3209 }
3210
3211 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3212  * from set_multicast.
3213  */
3214 static void
3215 bnx2_set_rx_mode(struct net_device *dev)
3216 {
3217         struct bnx2 *bp = netdev_priv(dev);
3218         u32 rx_mode, sort_mode;
3219         int i;
3220
3221         spin_lock_bh(&bp->phy_lock);
3222
3223         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3224                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3225         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3226 #ifdef BCM_VLAN
3227         if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3228                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3229 #else
3230         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3231                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3232 #endif
3233         if (dev->flags & IFF_PROMISC) {
3234                 /* Promiscuous mode. */
3235                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3236                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3237                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3238         }
3239         else if (dev->flags & IFF_ALLMULTI) {
3240                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3241                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3242                                0xffffffff);
3243                 }
3244                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3245         }
3246         else {
3247                 /* Accept one or more multicast(s). */
3248                 struct dev_mc_list *mclist;
3249                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3250                 u32 regidx;
3251                 u32 bit;
3252                 u32 crc;
3253
3254                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3255
3256                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3257                      i++, mclist = mclist->next) {
3258
3259                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3260                         bit = crc & 0xff;
3261                         regidx = (bit & 0xe0) >> 5;
3262                         bit &= 0x1f;
3263                         mc_filter[regidx] |= (1 << bit);
3264                 }
3265
3266                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3267                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3268                                mc_filter[i]);
3269                 }
3270
3271                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3272         }
3273
3274         if (rx_mode != bp->rx_mode) {
3275                 bp->rx_mode = rx_mode;
3276                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3277         }
3278
3279         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3280         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3281         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3282
3283         spin_unlock_bh(&bp->phy_lock);
3284 }
3285
3286 static void
3287 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3288         u32 rv2p_proc)
3289 {
3290         int i;
3291         u32 val;
3292
3293         if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3294                 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3295                 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3296                 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3297                 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3298         }
3299
3300         for (i = 0; i < rv2p_code_len; i += 8) {
3301                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3302                 rv2p_code++;
3303                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3304                 rv2p_code++;
3305
3306                 if (rv2p_proc == RV2P_PROC1) {
3307                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3308                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3309                 }
3310                 else {
3311                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3312                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3313                 }
3314         }
3315
3316         /* Reset the processor, un-stall is done later. */
3317         if (rv2p_proc == RV2P_PROC1) {
3318                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3319         }
3320         else {
3321                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3322         }
3323 }
3324
3325 static int
3326 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3327 {
3328         u32 offset;
3329         u32 val;
3330         int rc;
3331
3332         /* Halt the CPU. */
3333         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3334         val |= cpu_reg->mode_value_halt;
3335         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3336         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3337
3338         /* Load the Text area. */
3339         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3340         if (fw->gz_text) {
3341                 int j;
3342
3343                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3344                                        fw->gz_text_len);
3345                 if (rc < 0)
3346                         return rc;
3347
3348                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3349                         bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3350                 }
3351         }
3352
3353         /* Load the Data area. */
3354         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3355         if (fw->data) {
3356                 int j;
3357
3358                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3359                         bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3360                 }
3361         }
3362
3363         /* Load the SBSS area. */
3364         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3365         if (fw->sbss_len) {
3366                 int j;
3367
3368                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3369                         bnx2_reg_wr_ind(bp, offset, 0);
3370                 }
3371         }
3372
3373         /* Load the BSS area. */
3374         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3375         if (fw->bss_len) {
3376                 int j;
3377
3378                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3379                         bnx2_reg_wr_ind(bp, offset, 0);
3380                 }
3381         }
3382
3383         /* Load the Read-Only area. */
3384         offset = cpu_reg->spad_base +
3385                 (fw->rodata_addr - cpu_reg->mips_view_base);
3386         if (fw->rodata) {
3387                 int j;
3388
3389                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3390                         bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3391                 }
3392         }
3393
3394         /* Clear the pre-fetch instruction. */
3395         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3396         bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3397
3398         /* Start the CPU. */
3399         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3400         val &= ~cpu_reg->mode_value_halt;
3401         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3402         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3403
3404         return 0;
3405 }
3406
3407 static int
3408 bnx2_init_cpus(struct bnx2 *bp)
3409 {
3410         struct fw_info *fw;
3411         int rc, rv2p_len;
3412         void *text, *rv2p;
3413
3414         /* Initialize the RV2P processor. */
3415         text = vmalloc(FW_BUF_SIZE);
3416         if (!text)
3417                 return -ENOMEM;
3418         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3419                 rv2p = bnx2_xi_rv2p_proc1;
3420                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3421         } else {
3422                 rv2p = bnx2_rv2p_proc1;
3423                 rv2p_len = sizeof(bnx2_rv2p_proc1);
3424         }
3425         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3426         if (rc < 0)
3427                 goto init_cpu_err;
3428
3429         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3430
3431         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3432                 rv2p = bnx2_xi_rv2p_proc2;
3433                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3434         } else {
3435                 rv2p = bnx2_rv2p_proc2;
3436                 rv2p_len = sizeof(bnx2_rv2p_proc2);
3437         }
3438         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3439         if (rc < 0)
3440                 goto init_cpu_err;
3441
3442         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3443
3444         /* Initialize the RX Processor. */
3445         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446                 fw = &bnx2_rxp_fw_09;
3447         else
3448                 fw = &bnx2_rxp_fw_06;
3449
3450         fw->text = text;
3451         rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3452         if (rc)
3453                 goto init_cpu_err;
3454
3455         /* Initialize the TX Processor. */
3456         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3457                 fw = &bnx2_txp_fw_09;
3458         else
3459                 fw = &bnx2_txp_fw_06;
3460
3461         fw->text = text;
3462         rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3463         if (rc)
3464                 goto init_cpu_err;
3465
3466         /* Initialize the TX Patch-up Processor. */
3467         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3468                 fw = &bnx2_tpat_fw_09;
3469         else
3470                 fw = &bnx2_tpat_fw_06;
3471
3472         fw->text = text;
3473         rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3474         if (rc)
3475                 goto init_cpu_err;
3476
3477         /* Initialize the Completion Processor. */
3478         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3479                 fw = &bnx2_com_fw_09;
3480         else
3481                 fw = &bnx2_com_fw_06;
3482
3483         fw->text = text;
3484         rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3485         if (rc)
3486                 goto init_cpu_err;
3487
3488         /* Initialize the Command Processor. */
3489         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3490                 fw = &bnx2_cp_fw_09;
3491         else
3492                 fw = &bnx2_cp_fw_06;
3493
3494         fw->text = text;
3495         rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3496
3497 init_cpu_err:
3498         vfree(text);
3499         return rc;
3500 }
3501
3502 static int
3503 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3504 {
3505         u16 pmcsr;
3506
3507         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3508
3509         switch (state) {
3510         case PCI_D0: {
3511                 u32 val;
3512
3513                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3514                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3515                         PCI_PM_CTRL_PME_STATUS);
3516
3517                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3518                         /* delay required during transition out of D3hot */
3519                         msleep(20);
3520
3521                 val = REG_RD(bp, BNX2_EMAC_MODE);
3522                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3523                 val &= ~BNX2_EMAC_MODE_MPKT;
3524                 REG_WR(bp, BNX2_EMAC_MODE, val);
3525
3526                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3527                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3528                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3529                 break;
3530         }
3531         case PCI_D3hot: {
3532                 int i;
3533                 u32 val, wol_msg;
3534
3535                 if (bp->wol) {
3536                         u32 advertising;
3537                         u8 autoneg;
3538
3539                         autoneg = bp->autoneg;
3540                         advertising = bp->advertising;
3541
3542                         if (bp->phy_port == PORT_TP) {
3543                                 bp->autoneg = AUTONEG_SPEED;
3544                                 bp->advertising = ADVERTISED_10baseT_Half |
3545                                         ADVERTISED_10baseT_Full |
3546                                         ADVERTISED_100baseT_Half |
3547                                         ADVERTISED_100baseT_Full |
3548                                         ADVERTISED_Autoneg;
3549                         }
3550
3551                         spin_lock_bh(&bp->phy_lock);
3552                         bnx2_setup_phy(bp, bp->phy_port);
3553                         spin_unlock_bh(&bp->phy_lock);
3554
3555                         bp->autoneg = autoneg;
3556                         bp->advertising = advertising;
3557
3558                         bnx2_set_mac_addr(bp);
3559
3560                         val = REG_RD(bp, BNX2_EMAC_MODE);
3561
3562                         /* Enable port mode. */
3563                         val &= ~BNX2_EMAC_MODE_PORT;
3564                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3565                                BNX2_EMAC_MODE_ACPI_RCVD |
3566                                BNX2_EMAC_MODE_MPKT;
3567                         if (bp->phy_port == PORT_TP)
3568                                 val |= BNX2_EMAC_MODE_PORT_MII;
3569                         else {
3570                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3571                                 if (bp->line_speed == SPEED_2500)
3572                                         val |= BNX2_EMAC_MODE_25G_MODE;
3573                         }
3574
3575                         REG_WR(bp, BNX2_EMAC_MODE, val);
3576
3577                         /* receive all multicast */
3578                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3579                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3580                                        0xffffffff);
3581                         }
3582                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3583                                BNX2_EMAC_RX_MODE_SORT_MODE);
3584
3585                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3586                               BNX2_RPM_SORT_USER0_MC_EN;
3587                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3588                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3589                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3590                                BNX2_RPM_SORT_USER0_ENA);
3591
3592                         /* Need to enable EMAC and RPM for WOL. */
3593                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3594                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3595                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3596                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3597
3598                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3599                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3600                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3601
3602                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3603                 }
3604                 else {
3605                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3606                 }
3607
3608                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3609                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3610
3611                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3612                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3613                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3614
3615                         if (bp->wol)
3616                                 pmcsr |= 3;
3617                 }
3618                 else {
3619                         pmcsr |= 3;
3620                 }
3621                 if (bp->wol) {
3622                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3623                 }
3624                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3625                                       pmcsr);
3626
3627                 /* No more memory access after this point until
3628                  * device is brought back to D0.
3629                  */
3630                 udelay(50);
3631                 break;
3632         }
3633         default:
3634                 return -EINVAL;
3635         }
3636         return 0;
3637 }
3638
3639 static int
3640 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3641 {
3642         u32 val;
3643         int j;
3644
3645         /* Request access to the flash interface. */
3646         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3647         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3648                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3649                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3650                         break;
3651
3652                 udelay(5);
3653         }
3654
3655         if (j >= NVRAM_TIMEOUT_COUNT)
3656                 return -EBUSY;
3657
3658         return 0;
3659 }
3660
3661 static int
3662 bnx2_release_nvram_lock(struct bnx2 *bp)
3663 {
3664         int j;
3665         u32 val;
3666
3667         /* Relinquish nvram interface. */
3668         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3669
3670         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3671                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3672                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3673                         break;
3674
3675                 udelay(5);
3676         }
3677
3678         if (j >= NVRAM_TIMEOUT_COUNT)
3679                 return -EBUSY;
3680
3681         return 0;
3682 }
3683
3684
3685 static int
3686 bnx2_enable_nvram_write(struct bnx2 *bp)
3687 {
3688         u32 val;
3689
3690         val = REG_RD(bp, BNX2_MISC_CFG);
3691         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3692
3693         if (bp->flash_info->flags & BNX2_NV_WREN) {
3694                 int j;
3695
3696                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3697                 REG_WR(bp, BNX2_NVM_COMMAND,
3698                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3699
3700                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3701                         udelay(5);
3702
3703                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3704                         if (val & BNX2_NVM_COMMAND_DONE)
3705                                 break;
3706                 }
3707
3708                 if (j >= NVRAM_TIMEOUT_COUNT)
3709                         return -EBUSY;
3710         }
3711         return 0;
3712 }
3713
3714 static void
3715 bnx2_disable_nvram_write(struct bnx2 *bp)
3716 {
3717         u32 val;
3718
3719         val = REG_RD(bp, BNX2_MISC_CFG);
3720         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3721 }
3722
3723
3724 static void
3725 bnx2_enable_nvram_access(struct bnx2 *bp)
3726 {
3727         u32 val;
3728
3729         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3730         /* Enable both bits, even on read. */
3731         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3732                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3733 }
3734
3735 static void
3736 bnx2_disable_nvram_access(struct bnx2 *bp)
3737 {
3738         u32 val;
3739
3740         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3741         /* Disable both bits, even after read. */
3742         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3743                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3744                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3745 }
3746
3747 static int
3748 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3749 {
3750         u32 cmd;
3751         int j;
3752
3753         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3754                 /* Buffered flash, no erase needed */
3755                 return 0;
3756
3757         /* Build an erase command */
3758         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3759               BNX2_NVM_COMMAND_DOIT;
3760
3761         /* Need to clear DONE bit separately. */
3762         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3763
3764         /* Address of the NVRAM to read from. */
3765         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3766
3767         /* Issue an erase command. */
3768         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3769
3770         /* Wait for completion. */
3771         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3772                 u32 val;
3773
3774                 udelay(5);
3775
3776                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3777                 if (val & BNX2_NVM_COMMAND_DONE)
3778                         break;
3779         }
3780
3781         if (j >= NVRAM_TIMEOUT_COUNT)
3782                 return -EBUSY;
3783
3784         return 0;
3785 }
3786
3787 static int
3788 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3789 {
3790         u32 cmd;
3791         int j;
3792
3793         /* Build the command word. */
3794         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3795
3796         /* Calculate an offset of a buffered flash, not needed for 5709. */
3797         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3798                 offset = ((offset / bp->flash_info->page_size) <<
3799                            bp->flash_info->page_bits) +
3800                           (offset % bp->flash_info->page_size);
3801         }
3802
3803         /* Need to clear DONE bit separately. */
3804         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3805
3806         /* Address of the NVRAM to read from. */
3807         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3808
3809         /* Issue a read command. */
3810         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3811
3812         /* Wait for completion. */
3813         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3814                 u32 val;
3815
3816                 udelay(5);
3817
3818                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3819                 if (val & BNX2_NVM_COMMAND_DONE) {
3820                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3821                         memcpy(ret_val, &v, 4);
3822                         break;
3823                 }
3824         }
3825         if (j >= NVRAM_TIMEOUT_COUNT)
3826                 return -EBUSY;
3827
3828         return 0;
3829 }
3830
3831
3832 static int
3833 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3834 {
3835         u32 cmd;
3836         __be32 val32;
3837         int j;
3838
3839         /* Build the command word. */
3840         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3841
3842         /* Calculate an offset of a buffered flash, not needed for 5709. */
3843         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3844                 offset = ((offset / bp->flash_info->page_size) <<
3845                           bp->flash_info->page_bits) +
3846                          (offset % bp->flash_info->page_size);
3847         }
3848
3849         /* Need to clear DONE bit separately. */
3850         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3851
3852         memcpy(&val32, val, 4);
3853
3854         /* Write the data. */
3855         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3856
3857         /* Address of the NVRAM to write to. */
3858         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3859
3860         /* Issue the write command. */
3861         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3862
3863         /* Wait for completion. */
3864         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3865                 udelay(5);
3866
3867                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3868                         break;
3869         }
3870         if (j >= NVRAM_TIMEOUT_COUNT)
3871                 return -EBUSY;
3872
3873         return 0;
3874 }
3875
3876 static int
3877 bnx2_init_nvram(struct bnx2 *bp)
3878 {
3879         u32 val;
3880         int j, entry_count, rc = 0;
3881         struct flash_spec *flash;
3882
3883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884                 bp->flash_info = &flash_5709;
3885                 goto get_flash_size;
3886         }
3887
3888         /* Determine the selected interface. */
3889         val = REG_RD(bp, BNX2_NVM_CFG1);
3890
3891         entry_count = ARRAY_SIZE(flash_table);
3892
3893         if (val & 0x40000000) {
3894
3895                 /* Flash interface has been reconfigured */
3896                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3897                      j++, flash++) {
3898                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3899                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3900                                 bp->flash_info = flash;
3901                                 break;
3902                         }
3903                 }
3904         }
3905         else {
3906                 u32 mask;
3907                 /* Not yet been reconfigured */
3908
3909                 if (val & (1 << 23))
3910                         mask = FLASH_BACKUP_STRAP_MASK;
3911                 else
3912                         mask = FLASH_STRAP_MASK;
3913
3914                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3915                         j++, flash++) {
3916
3917                         if ((val & mask) == (flash->strapping & mask)) {
3918                                 bp->flash_info = flash;
3919
3920                                 /* Request access to the flash interface. */
3921                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3922                                         return rc;
3923
3924                                 /* Enable access to flash interface */
3925                                 bnx2_enable_nvram_access(bp);
3926
3927                                 /* Reconfigure the flash interface */
3928                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3929                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3930                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3931                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3932
3933                                 /* Disable access to flash interface */
3934                                 bnx2_disable_nvram_access(bp);
3935                                 bnx2_release_nvram_lock(bp);
3936
3937                                 break;
3938                         }
3939                 }
3940         } /* if (val & 0x40000000) */
3941
3942         if (j == entry_count) {
3943                 bp->flash_info = NULL;
3944                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3945                 return -ENODEV;
3946         }
3947
3948 get_flash_size:
3949         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3950         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3951         if (val)
3952                 bp->flash_size = val;
3953         else
3954                 bp->flash_size = bp->flash_info->total_size;
3955
3956         return rc;
3957 }
3958
3959 static int
3960 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3961                 int buf_size)
3962 {
3963         int rc = 0;
3964         u32 cmd_flags, offset32, len32, extra;
3965
3966         if (buf_size == 0)
3967                 return 0;
3968
3969         /* Request access to the flash interface. */
3970         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3971                 return rc;
3972
3973         /* Enable access to flash interface */
3974         bnx2_enable_nvram_access(bp);
3975
3976         len32 = buf_size;
3977         offset32 = offset;
3978         extra = 0;
3979
3980         cmd_flags = 0;
3981
3982         if (offset32 & 3) {
3983                 u8 buf[4];
3984                 u32 pre_len;
3985
3986                 offset32 &= ~3;
3987                 pre_len = 4 - (offset & 3);
3988
3989                 if (pre_len >= len32) {
3990                         pre_len = len32;
3991                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3992                                     BNX2_NVM_COMMAND_LAST;
3993                 }
3994                 else {
3995                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3996                 }
3997
3998                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3999
4000                 if (rc)
4001                         return rc;
4002
4003                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4004
4005                 offset32 += 4;
4006                 ret_buf += pre_len;
4007                 len32 -= pre_len;
4008         }
4009         if (len32 & 3) {
4010                 extra = 4 - (len32 & 3);
4011                 len32 = (len32 + 4) & ~3;
4012         }
4013
4014         if (len32 == 4) {
4015                 u8 buf[4];
4016
4017                 if (cmd_flags)
4018                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4019                 else
4020                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4021                                     BNX2_NVM_COMMAND_LAST;
4022
4023                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4024
4025                 memcpy(ret_buf, buf, 4 - extra);
4026         }
4027         else if (len32 > 0) {
4028                 u8 buf[4];
4029
4030                 /* Read the first word. */
4031                 if (cmd_flags)
4032                         cmd_flags = 0;
4033                 else
4034                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4035
4036                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4037
4038                 /* Advance to the next dword. */
4039                 offset32 += 4;
4040                 ret_buf += 4;
4041                 len32 -= 4;
4042
4043                 while (len32 > 4 && rc == 0) {
4044                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4045
4046                         /* Advance to the next dword. */
4047                         offset32 += 4;
4048                         ret_buf += 4;
4049                         len32 -= 4;
4050                 }
4051
4052                 if (rc)
4053                         return rc;
4054
4055                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4056                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4057
4058                 memcpy(ret_buf, buf, 4 - extra);
4059         }
4060
4061         /* Disable access to flash interface */
4062         bnx2_disable_nvram_access(bp);
4063
4064         bnx2_release_nvram_lock(bp);
4065
4066         return rc;
4067 }
4068
4069 static int
4070 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4071                 int buf_size)
4072 {
4073         u32 written, offset32, len32;
4074         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4075         int rc = 0;
4076         int align_start, align_end;
4077
4078         buf = data_buf;
4079         offset32 = offset;
4080         len32 = buf_size;
4081         align_start = align_end = 0;
4082
4083         if ((align_start = (offset32 & 3))) {
4084                 offset32 &= ~3;
4085                 len32 += align_start;
4086                 if (len32 < 4)
4087                         len32 = 4;
4088                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4089                         return rc;
4090         }
4091
4092         if (len32 & 3) {
4093                 align_end = 4 - (len32 & 3);
4094                 len32 += align_end;
4095                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4096                         return rc;
4097         }
4098
4099         if (align_start || align_end) {
4100                 align_buf = kmalloc(len32, GFP_KERNEL);
4101                 if (align_buf == NULL)
4102                         return -ENOMEM;
4103                 if (align_start) {
4104                         memcpy(align_buf, start, 4);
4105                 }
4106                 if (align_end) {
4107                         memcpy(align_buf + len32 - 4, end, 4);
4108                 }
4109                 memcpy(align_buf + align_start, data_buf, buf_size);
4110                 buf = align_buf;
4111         }
4112
4113         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4114                 flash_buffer = kmalloc(264, GFP_KERNEL);
4115                 if (flash_buffer == NULL) {
4116                         rc = -ENOMEM;
4117                         goto nvram_write_end;
4118                 }
4119         }
4120
4121         written = 0;
4122         while ((written < len32) && (rc == 0)) {
4123                 u32 page_start, page_end, data_start, data_end;
4124                 u32 addr, cmd_flags;
4125                 int i;
4126
4127                 /* Find the page_start addr */
4128                 page_start = offset32 + written;
4129                 page_start -= (page_start % bp->flash_info->page_size);
4130                 /* Find the page_end addr */
4131                 page_end = page_start + bp->flash_info->page_size;
4132                 /* Find the data_start addr */
4133                 data_start = (written == 0) ? offset32 : page_start;
4134                 /* Find the data_end addr */
4135                 data_end = (page_end > offset32 + len32) ?
4136                         (offset32 + len32) : page_end;
4137
4138                 /* Request access to the flash interface. */
4139                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4140                         goto nvram_write_end;
4141
4142                 /* Enable access to flash interface */
4143                 bnx2_enable_nvram_access(bp);
4144
4145                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4146                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4147                         int j;
4148
4149                         /* Read the whole page into the buffer
4150                          * (non-buffer flash only) */
4151                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4152                                 if (j == (bp->flash_info->page_size - 4)) {
4153                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4154                                 }
4155                                 rc = bnx2_nvram_read_dword(bp,
4156                                         page_start + j,
4157                                         &flash_buffer[j],
4158                                         cmd_flags);
4159
4160                                 if (rc)
4161                                         goto nvram_write_end;
4162
4163                                 cmd_flags = 0;
4164                         }
4165                 }
4166
4167                 /* Enable writes to flash interface (unlock write-protect) */
4168                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4169                         goto nvram_write_end;
4170
4171                 /* Loop to write back the buffer data from page_start to
4172                  * data_start */
4173                 i = 0;
4174                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4175                         /* Erase the page */
4176                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4177                                 goto nvram_write_end;
4178
4179                         /* Re-enable the write again for the actual write */
4180                         bnx2_enable_nvram_write(bp);
4181
4182                         for (addr = page_start; addr < data_start;
4183                                 addr += 4, i += 4) {
4184
4185                                 rc = bnx2_nvram_write_dword(bp, addr,
4186                                         &flash_buffer[i], cmd_flags);
4187
4188                                 if (rc != 0)
4189                                         goto nvram_write_end;
4190
4191                                 cmd_flags = 0;
4192                         }
4193                 }
4194
4195                 /* Loop to write the new data from data_start to data_end */
4196                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4197                         if ((addr == page_end - 4) ||
4198                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4199                                  (addr == data_end - 4))) {
4200
4201                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4202                         }
4203                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4204                                 cmd_flags);
4205
4206                         if (rc != 0)
4207                                 goto nvram_write_end;
4208
4209                         cmd_flags = 0;
4210                         buf += 4;
4211                 }
4212
4213                 /* Loop to write back the buffer data from data_end
4214                  * to page_end */
4215                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4216                         for (addr = data_end; addr < page_end;
4217                                 addr += 4, i += 4) {
4218
4219                                 if (addr == page_end-4) {
4220                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4221                                 }
4222                                 rc = bnx2_nvram_write_dword(bp, addr,
4223                                         &flash_buffer[i], cmd_flags);
4224
4225                                 if (rc != 0)
4226                                         goto nvram_write_end;
4227
4228                                 cmd_flags = 0;
4229                         }
4230                 }
4231
4232                 /* Disable writes to flash interface (lock write-protect) */
4233                 bnx2_disable_nvram_write(bp);
4234
4235                 /* Disable access to flash interface */
4236                 bnx2_disable_nvram_access(bp);
4237                 bnx2_release_nvram_lock(bp);
4238
4239                 /* Increment written */
4240                 written += data_end - data_start;
4241         }
4242
4243 nvram_write_end:
4244         kfree(flash_buffer);
4245         kfree(align_buf);
4246         return rc;
4247 }
4248
4249 static void
4250 bnx2_init_remote_phy(struct bnx2 *bp)
4251 {
4252         u32 val;
4253
4254         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4255         if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4256                 return;
4257
4258         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4259         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4260                 return;
4261
4262         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4263                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4264
4265                 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4266                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4267                         bp->phy_port = PORT_FIBRE;
4268                 else
4269                         bp->phy_port = PORT_TP;
4270
4271                 if (netif_running(bp->dev)) {
4272                         u32 sig;
4273
4274                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4275                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4276                         bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4277                 }
4278         }
4279 }
4280
4281 static void
4282 bnx2_setup_msix_tbl(struct bnx2 *bp)
4283 {
4284         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4285
4286         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4287         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4288 }
4289
4290 static int
4291 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4292 {
4293         u32 val;
4294         int i, rc = 0;
4295         u8 old_port;
4296
4297         /* Wait for the current PCI transaction to complete before
4298          * issuing a reset. */
4299         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4300                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4301                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4302                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4303                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4304         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4305         udelay(5);
4306
4307         /* Wait for the firmware to tell us it is ok to issue a reset. */
4308         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4309
4310         /* Deposit a driver reset signature so the firmware knows that
4311          * this is a soft reset. */
4312         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4313                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4314
4315         /* Do a dummy read to force the chip to complete all current transaction
4316          * before we issue a reset. */
4317         val = REG_RD(bp, BNX2_MISC_ID);
4318
4319         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4320                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4321                 REG_RD(bp, BNX2_MISC_COMMAND);
4322                 udelay(5);
4323
4324                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4325                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4326
4327                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4328
4329         } else {
4330                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4331                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4332                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4333
4334                 /* Chip reset. */
4335                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4336
4337                 /* Reading back any register after chip reset will hang the
4338                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4339                  * of margin for write posting.
4340                  */
4341                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4342                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4343                         msleep(20);
4344
4345                 /* Reset takes approximate 30 usec */
4346                 for (i = 0; i < 10; i++) {
4347                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4348                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4349                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4350                                 break;
4351                         udelay(10);
4352                 }
4353
4354                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4355                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4356                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4357                         return -EBUSY;
4358                 }
4359         }
4360
4361         /* Make sure byte swapping is properly configured. */
4362         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4363         if (val != 0x01020304) {
4364                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4365                 return -ENODEV;
4366         }
4367
4368         /* Wait for the firmware to finish its initialization. */
4369         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4370         if (rc)
4371                 return rc;
4372
4373         spin_lock_bh(&bp->phy_lock);
4374         old_port = bp->phy_port;
4375         bnx2_init_remote_phy(bp);
4376         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4377             old_port != bp->phy_port)
4378                 bnx2_set_default_remote_link(bp);
4379         spin_unlock_bh(&bp->phy_lock);
4380
4381         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4382                 /* Adjust the voltage regular to two steps lower.  The default
4383                  * of this register is 0x0000000e. */
4384                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4385
4386                 /* Remove bad rbuf memory from the free pool. */
4387                 rc = bnx2_alloc_bad_rbuf(bp);
4388         }
4389
4390         if (bp->flags & BNX2_FLAG_USING_MSIX)
4391                 bnx2_setup_msix_tbl(bp);
4392
4393         return rc;
4394 }
4395
4396 static int
4397 bnx2_init_chip(struct bnx2 *bp)
4398 {
4399         u32 val;
4400         int rc, i;
4401
4402         /* Make sure the interrupt is not active. */
4403         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4404
4405         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4406               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4407 #ifdef __BIG_ENDIAN
4408               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4409 #endif
4410               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4411               DMA_READ_CHANS << 12 |
4412               DMA_WRITE_CHANS << 16;
4413
4414         val |= (0x2 << 20) | (1 << 11);
4415
4416         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4417                 val |= (1 << 23);
4418
4419         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4420             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4421                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4422
4423         REG_WR(bp, BNX2_DMA_CONFIG, val);
4424
4425         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4426                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4427                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4428                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4429         }
4430
4431         if (bp->flags & BNX2_FLAG_PCIX) {
4432                 u16 val16;
4433
4434                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4435                                      &val16);
4436                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4437                                       val16 & ~PCI_X_CMD_ERO);
4438         }
4439
4440         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4441                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4442                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4443                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4444
4445         /* Initialize context mapping and zero out the quick contexts.  The
4446          * context block must have already been enabled. */
4447         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4448                 rc = bnx2_init_5709_context(bp);
4449                 if (rc)
4450                         return rc;
4451         } else
4452                 bnx2_init_context(bp);
4453
4454         if ((rc = bnx2_init_cpus(bp)) != 0)
4455                 return rc;
4456
4457         bnx2_init_nvram(bp);
4458
4459         bnx2_set_mac_addr(bp);
4460
4461         val = REG_RD(bp, BNX2_MQ_CONFIG);
4462         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4463         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4464         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4465                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4466
4467         REG_WR(bp, BNX2_MQ_CONFIG, val);
4468
4469         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4470         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4471         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4472
4473         val = (BCM_PAGE_BITS - 8) << 24;
4474         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4475
4476         /* Configure page size. */
4477         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4478         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4479         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4480         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4481
4482         val = bp->mac_addr[0] +
4483               (bp->mac_addr[1] << 8) +
4484               (bp->mac_addr[2] << 16) +
4485               bp->mac_addr[3] +
4486               (bp->mac_addr[4] << 8) +
4487               (bp->mac_addr[5] << 16);
4488         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4489
4490         /* Program the MTU.  Also include 4 bytes for CRC32. */
4491         val = bp->dev->mtu + ETH_HLEN + 4;
4492         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4493                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4494         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4495
4496         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4497                 bp->bnx2_napi[i].last_status_idx = 0;
4498
4499         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4500
4501         /* Set up how to generate a link change interrupt. */
4502         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4503
4504         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4505                (u64) bp->status_blk_mapping & 0xffffffff);
4506         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4507
4508         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4509                (u64) bp->stats_blk_mapping & 0xffffffff);
4510         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4511                (u64) bp->stats_blk_mapping >> 32);
4512
4513         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4514                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4515
4516         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4517                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4518
4519         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4520                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4521
4522         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4523
4524         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4525
4526         REG_WR(bp, BNX2_HC_COM_TICKS,
4527                (bp->com_ticks_int << 16) | bp->com_ticks);
4528
4529         REG_WR(bp, BNX2_HC_CMD_TICKS,
4530                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4531
4532         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4533                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4534         else
4535                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4536         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4537
4538         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4539                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4540         else {
4541                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4542                       BNX2_HC_CONFIG_COLLECT_STATS;
4543         }
4544
4545         if (bp->irq_nvecs > 1) {
4546                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4547                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4548
4549                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4550         }
4551
4552         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4553                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4554
4555         REG_WR(bp, BNX2_HC_CONFIG, val);
4556
4557         for (i = 1; i < bp->irq_nvecs; i++) {
4558                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4559                            BNX2_HC_SB_CONFIG_1;
4560
4561                 REG_WR(bp, base,
4562                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4563                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4564                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4565
4566                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4567                         (bp->tx_quick_cons_trip_int << 16) |
4568                          bp->tx_quick_cons_trip);
4569
4570                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4571                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4572
4573                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4574                        (bp->rx_quick_cons_trip_int << 16) |
4575                         bp->rx_quick_cons_trip);
4576
4577                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4578                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4579         }
4580
4581         /* Clear internal stats counters. */
4582         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4583
4584         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4585
4586         /* Initialize the receive filter. */
4587         bnx2_set_rx_mode(bp->dev);
4588
4589         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4590                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4591                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4592                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4593         }
4594         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4595                           0);
4596
4597         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4598         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4599
4600         udelay(20);
4601
4602         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4603
4604         return rc;
4605 }
4606
4607 static void
4608 bnx2_clear_ring_states(struct bnx2 *bp)
4609 {
4610         struct bnx2_napi *bnapi;
4611         struct bnx2_tx_ring_info *txr;
4612         struct bnx2_rx_ring_info *rxr;
4613         int i;
4614
4615         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4616                 bnapi = &bp->bnx2_napi[i];
4617                 txr = &bnapi->tx_ring;
4618                 rxr = &bnapi->rx_ring;
4619
4620                 txr->tx_cons = 0;
4621                 txr->hw_tx_cons = 0;
4622                 rxr->rx_prod_bseq = 0;
4623                 rxr->rx_prod = 0;
4624                 rxr->rx_cons = 0;
4625                 rxr->rx_pg_prod = 0;
4626                 rxr->rx_pg_cons = 0;
4627         }
4628 }
4629
4630 static void
4631 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4632 {
4633         u32 val, offset0, offset1, offset2, offset3;
4634         u32 cid_addr = GET_CID_ADDR(cid);
4635
4636         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4637                 offset0 = BNX2_L2CTX_TYPE_XI;
4638                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4639                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4640                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4641         } else {
4642                 offset0 = BNX2_L2CTX_TYPE;
4643                 offset1 = BNX2_L2CTX_CMD_TYPE;
4644                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4645                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4646         }
4647         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4648         bnx2_ctx_wr(bp, cid_addr, offset0, val);
4649
4650         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4651         bnx2_ctx_wr(bp, cid_addr, offset1, val);
4652
4653         val = (u64) txr->tx_desc_mapping >> 32;
4654         bnx2_ctx_wr(bp, cid_addr, offset2, val);
4655
4656         val = (u64) txr->tx_desc_mapping & 0xffffffff;
4657         bnx2_ctx_wr(bp, cid_addr, offset3, val);
4658 }
4659
4660 static void
4661 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4662 {
4663         struct tx_bd *txbd;
4664         u32 cid = TX_CID;
4665         struct bnx2_napi *bnapi;
4666         struct bnx2_tx_ring_info *txr;
4667
4668         bnapi = &bp->bnx2_napi[ring_num];
4669         txr = &bnapi->tx_ring;
4670
4671         if (ring_num == 0)
4672                 cid = TX_CID;
4673         else
4674                 cid = TX_TSS_CID + ring_num - 1;
4675
4676         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4677
4678         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4679
4680         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4681         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4682
4683         txr->tx_prod = 0;
4684         txr->tx_prod_bseq = 0;
4685
4686         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4687         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4688
4689         bnx2_init_tx_context(bp, cid, txr);
4690 }
4691
4692 static void
4693 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4694                      int num_rings)
4695 {
4696         int i;
4697         struct rx_bd *rxbd;
4698
4699         for (i = 0; i < num_rings; i++) {
4700                 int j;
4701
4702                 rxbd = &rx_ring[i][0];
4703                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4704                         rxbd->rx_bd_len = buf_size;
4705                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4706                 }
4707                 if (i == (num_rings - 1))
4708                         j = 0;
4709                 else
4710                         j = i + 1;
4711                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4712                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4713         }
4714 }
4715
4716 static void
4717 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4718 {
4719         int i;
4720         u16 prod, ring_prod;
4721         u32 cid, rx_cid_addr, val;
4722         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4723         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4724
4725         if (ring_num == 0)
4726                 cid = RX_CID;
4727         else
4728                 cid = RX_RSS_CID + ring_num - 1;
4729
4730         rx_cid_addr = GET_CID_ADDR(cid);
4731
4732         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4733                              bp->rx_buf_use_size, bp->rx_max_ring);
4734
4735         bnx2_init_rx_context(bp, cid);
4736
4737         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4738                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4739                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4740         }
4741
4742         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4743         if (bp->rx_pg_ring_size) {
4744                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4745                                      rxr->rx_pg_desc_mapping,
4746                                      PAGE_SIZE, bp->rx_max_pg_ring);
4747                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4748                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4749                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4750                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4751
4752                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4753                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4754
4755                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4756                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4757
4758                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4759                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4760         }
4761
4762         val = (u64) rxr->rx_desc_mapping[0] >> 32;
4763         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4764
4765         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4766         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4767
4768         ring_prod = prod = rxr->rx_pg_prod;
4769         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4770                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4771                         break;
4772                 prod = NEXT_RX_BD(prod);
4773                 ring_prod = RX_PG_RING_IDX(prod);
4774         }
4775         rxr->rx_pg_prod = prod;
4776
4777         ring_prod = prod = rxr->rx_prod;
4778         for (i = 0; i < bp->rx_ring_size; i++) {
4779                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4780                         break;
4781                 prod = NEXT_RX_BD(prod);
4782                 ring_prod = RX_RING_IDX(prod);
4783         }
4784         rxr->rx_prod = prod;
4785
4786         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4787         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4788         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4789
4790         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4791         REG_WR16(bp, rxr->rx_bidx_addr, prod);
4792
4793         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4794 }
4795
4796 static void
4797 bnx2_init_all_rings(struct bnx2 *bp)
4798 {
4799         int i;
4800         u32 val;
4801
4802         bnx2_clear_ring_states(bp);
4803
4804         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4805         for (i = 0; i < bp->num_tx_rings; i++)
4806                 bnx2_init_tx_ring(bp, i);
4807
4808         if (bp->num_tx_rings > 1)
4809                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4810                        (TX_TSS_CID << 7));
4811
4812         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4813         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4814
4815         for (i = 0; i < bp->num_rx_rings; i++)
4816                 bnx2_init_rx_ring(bp, i);
4817
4818         if (bp->num_rx_rings > 1) {
4819                 u32 tbl_32;
4820                 u8 *tbl = (u8 *) &tbl_32;
4821
4822                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4823                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4824
4825                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4826                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
4827                         if ((i % 4) == 3)
4828                                 bnx2_reg_wr_ind(bp,
4829                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
4830                                                 cpu_to_be32(tbl_32));
4831                 }
4832
4833                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4834                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4835
4836                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4837
4838         }
4839 }
4840
4841 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4842 {
4843         u32 max, num_rings = 1;
4844
4845         while (ring_size > MAX_RX_DESC_CNT) {
4846                 ring_size -= MAX_RX_DESC_CNT;
4847                 num_rings++;
4848         }
4849         /* round to next power of 2 */
4850         max = max_size;
4851         while ((max & num_rings) == 0)
4852                 max >>= 1;
4853
4854         if (num_rings != max)
4855                 max <<= 1;
4856
4857         return max;
4858 }
4859
4860 static void
4861 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4862 {
4863         u32 rx_size, rx_space, jumbo_size;
4864
4865         /* 8 for CRC and VLAN */
4866         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4867
4868         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4869                 sizeof(struct skb_shared_info);
4870
4871         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4872         bp->rx_pg_ring_size = 0;
4873         bp->rx_max_pg_ring = 0;
4874         bp->rx_max_pg_ring_idx = 0;
4875         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4876                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4877
4878                 jumbo_size = size * pages;
4879                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4880                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4881
4882                 bp->rx_pg_ring_size = jumbo_size;
4883                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4884                                                         MAX_RX_PG_RINGS);
4885                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4886                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4887                 bp->rx_copy_thresh = 0;
4888         }
4889
4890         bp->rx_buf_use_size = rx_size;
4891         /* hw alignment */
4892         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4893         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4894         bp->rx_ring_size = size;
4895         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4896         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4897 }
4898
4899 static void
4900 bnx2_free_tx_skbs(struct bnx2 *bp)
4901 {
4902         int i;
4903
4904         for (i = 0; i < bp->num_tx_rings; i++) {
4905                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4906                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4907                 int j;
4908
4909                 if (txr->tx_buf_ring == NULL)
4910                         continue;
4911
4912                 for (j = 0; j < TX_DESC_CNT; ) {
4913                         struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4914                         struct sk_buff *skb = tx_buf->skb;
4915                         int k, last;
4916
4917                         if (skb == NULL) {
4918                                 j++;
4919                                 continue;
4920                         }
4921
4922                         pci_unmap_single(bp->pdev,
4923                                          pci_unmap_addr(tx_buf, mapping),
4924                         skb_headlen(skb), PCI_DMA_TODEVICE);
4925
4926                         tx_buf->skb = NULL;
4927
4928                         last = skb_shinfo(skb)->nr_frags;
4929                         for (k = 0; k < last; k++) {
4930                                 tx_buf = &txr->tx_buf_ring[j + k + 1];
4931                                 pci_unmap_page(bp->pdev,
4932                                         pci_unmap_addr(tx_buf, mapping),
4933                                         skb_shinfo(skb)->frags[j].size,
4934                                         PCI_DMA_TODEVICE);
4935                         }
4936                         dev_kfree_skb(skb);
4937                         j += k + 1;
4938                 }
4939         }
4940 }
4941
4942 static void
4943 bnx2_free_rx_skbs(struct bnx2 *bp)
4944 {
4945         int i;
4946
4947         for (i = 0; i < bp->num_rx_rings; i++) {
4948                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4949                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4950                 int j;
4951
4952                 if (rxr->rx_buf_ring == NULL)
4953                         return;
4954
4955                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4956                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4957                         struct sk_buff *skb = rx_buf->skb;
4958
4959                         if (skb == NULL)
4960                                 continue;
4961
4962                         pci_unmap_single(bp->pdev,
4963                                          pci_unmap_addr(rx_buf, mapping),
4964                                          bp->rx_buf_use_size,
4965                                          PCI_DMA_FROMDEVICE);
4966
4967                         rx_buf->skb = NULL;
4968
4969                         dev_kfree_skb(skb);
4970                 }
4971                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4972                         bnx2_free_rx_page(bp, rxr, j);
4973         }
4974 }
4975
4976 static void
4977 bnx2_free_skbs(struct bnx2 *bp)
4978 {
4979         bnx2_free_tx_skbs(bp);
4980         bnx2_free_rx_skbs(bp);
4981 }
4982
4983 static int
4984 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4985 {
4986         int rc;
4987
4988         rc = bnx2_reset_chip(bp, reset_code);
4989         bnx2_free_skbs(bp);
4990         if (rc)
4991                 return rc;
4992
4993         if ((rc = bnx2_init_chip(bp)) != 0)
4994                 return rc;
4995
4996         bnx2_init_all_rings(bp);
4997         return 0;
4998 }
4999
5000 static int
5001 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5002 {
5003         int rc;
5004
5005         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5006                 return rc;
5007
5008         spin_lock_bh(&bp->phy_lock);
5009         bnx2_init_phy(bp, reset_phy);
5010         bnx2_set_link(bp);
5011         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5012                 bnx2_remote_phy_event(bp);
5013         spin_unlock_bh(&bp->phy_lock);
5014         return 0;
5015 }
5016
5017 static int
5018 bnx2_test_registers(struct bnx2 *bp)
5019 {
5020         int ret;
5021         int i, is_5709;
5022         static const struct {
5023                 u16   offset;
5024                 u16   flags;
5025 #define BNX2_FL_NOT_5709        1
5026                 u32   rw_mask;
5027                 u32   ro_mask;
5028         } reg_tbl[] = {
5029                 { 0x006c, 0, 0x00000000, 0x0000003f },
5030                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5031                 { 0x0094, 0, 0x00000000, 0x00000000 },
5032
5033                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5034                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5035                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5036                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5037                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5038                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5039                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5040                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5041                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5042
5043                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5044                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5045                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5046                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5047                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5048                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5049
5050                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5051                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5052                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5053
5054                 { 0x1000, 0, 0x00000000, 0x00000001 },
5055                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5056
5057                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5058                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5059                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5060                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5061                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5062                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5063                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5064                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5065                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5066                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5067
5068                 { 0x1800, 0, 0x00000000, 0x00000001 },
5069                 { 0x1804, 0, 0x00000000, 0x00000003 },
5070
5071                 { 0x2800, 0, 0x00000000, 0x00000001 },
5072                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5073                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5074                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5075                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5076                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5077                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5078                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5079                 { 0x2840, 0, 0x00000000, 0xffffffff },
5080                 { 0x2844, 0, 0x00000000, 0xffffffff },
5081                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5082                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5083
5084                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5085                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5086
5087                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5088                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5089                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5090                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5091                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5092                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5093                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5094                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5095                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5096
5097                 { 0x5004, 0, 0x00000000, 0x0000007f },
5098                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5099
5100                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5101                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5102                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5103                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5104                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5105                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5106                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5107                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5108                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5109
5110                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5111                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5112                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5113                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5114                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5115                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5116                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5117                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5118                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5119                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5120                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5121                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5122                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5123                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5124                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5125                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5126                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5127                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5128                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5129                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5130                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5131                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5132                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5133
5134                 { 0xffff, 0, 0x00000000, 0x00000000 },
5135         };
5136
5137         ret = 0;
5138         is_5709 = 0;
5139         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5140                 is_5709 = 1;
5141
5142         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5143                 u32 offset, rw_mask, ro_mask, save_val, val;
5144                 u16 flags = reg_tbl[i].flags;
5145
5146                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5147                         continue;
5148
5149                 offset = (u32) reg_tbl[i].offset;
5150                 rw_mask = reg_tbl[i].rw_mask;
5151                 ro_mask = reg_tbl[i].ro_mask;
5152
5153                 save_val = readl(bp->regview + offset);
5154
5155                 writel(0, bp->regview + offset);
5156
5157                 val = readl(bp->regview + offset);
5158                 if ((val & rw_mask) != 0) {
5159                         goto reg_test_err;
5160                 }
5161
5162                 if ((val & ro_mask) != (save_val & ro_mask)) {
5163                         goto reg_test_err;
5164                 }
5165
5166                 writel(0xffffffff, bp->regview + offset);
5167
5168                 val = readl(bp->regview + offset);
5169                 if ((val & rw_mask) != rw_mask) {
5170                         goto reg_test_err;
5171                 }
5172
5173                 if ((val & ro_mask) != (save_val & ro_mask)) {
5174                         goto reg_test_err;
5175                 }
5176
5177                 writel(save_val, bp->regview + offset);
5178                 continue;
5179
5180 reg_test_err:
5181                 writel(save_val, bp->regview + offset);
5182                 ret = -ENODEV;
5183                 break;
5184         }
5185         return ret;
5186 }
5187
5188 static int
5189 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5190 {
5191         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5192                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5193         int i;
5194
5195         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5196                 u32 offset;
5197
5198                 for (offset = 0; offset < size; offset += 4) {
5199
5200                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5201
5202                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5203                                 test_pattern[i]) {
5204                                 return -ENODEV;
5205                         }
5206                 }
5207         }
5208         return 0;
5209 }
5210
5211 static int
5212 bnx2_test_memory(struct bnx2 *bp)
5213 {
5214         int ret = 0;
5215         int i;
5216         static struct mem_entry {
5217                 u32   offset;
5218                 u32   len;
5219         } mem_tbl_5706[] = {
5220                 { 0x60000,  0x4000 },
5221                 { 0xa0000,  0x3000 },
5222                 { 0xe0000,  0x4000 },
5223                 { 0x120000, 0x4000 },
5224                 { 0x1a0000, 0x4000 },
5225                 { 0x160000, 0x4000 },
5226                 { 0xffffffff, 0    },
5227         },
5228         mem_tbl_5709[] = {
5229                 { 0x60000,  0x4000 },
5230                 { 0xa0000,  0x3000 },
5231                 { 0xe0000,  0x4000 },
5232                 { 0x120000, 0x4000 },
5233                 { 0x1a0000, 0x4000 },
5234                 { 0xffffffff, 0    },
5235         };
5236         struct mem_entry *mem_tbl;
5237
5238         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5239                 mem_tbl = mem_tbl_5709;
5240         else
5241                 mem_tbl = mem_tbl_5706;
5242
5243         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5244                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5245                         mem_tbl[i].len)) != 0) {
5246                         return ret;
5247                 }
5248         }
5249
5250         return ret;
5251 }
5252
5253 #define BNX2_MAC_LOOPBACK       0
5254 #define BNX2_PHY_LOOPBACK       1
5255
5256 static int
5257 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5258 {
5259         unsigned int pkt_size, num_pkts, i;
5260         struct sk_buff *skb, *rx_skb;
5261         unsigned char *packet;
5262         u16 rx_start_idx, rx_idx;
5263         dma_addr_t map;
5264         struct tx_bd *txbd;
5265         struct sw_bd *rx_buf;
5266         struct l2_fhdr *rx_hdr;
5267         int ret = -ENODEV;
5268         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5269         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5270         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5271
5272         tx_napi = bnapi;
5273
5274         txr = &tx_napi->tx_ring;
5275         rxr = &bnapi->rx_ring;
5276         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5277                 bp->loopback = MAC_LOOPBACK;
5278                 bnx2_set_mac_loopback(bp);
5279         }
5280         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5281                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5282                         return 0;
5283
5284                 bp->loopback = PHY_LOOPBACK;
5285                 bnx2_set_phy_loopback(bp);
5286         }
5287         else
5288                 return -EINVAL;
5289
5290         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5291         skb = netdev_alloc_skb(bp->dev, pkt_size);
5292         if (!skb)
5293                 return -ENOMEM;
5294         packet = skb_put(skb, pkt_size);
5295         memcpy(packet, bp->dev->dev_addr, 6);
5296         memset(packet + 6, 0x0, 8);
5297         for (i = 14; i < pkt_size; i++)
5298                 packet[i] = (unsigned char) (i & 0xff);
5299
5300         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5301                 PCI_DMA_TODEVICE);
5302
5303         REG_WR(bp, BNX2_HC_COMMAND,
5304                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5305
5306         REG_RD(bp, BNX2_HC_COMMAND);
5307
5308         udelay(5);
5309         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5310
5311         num_pkts = 0;
5312
5313         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5314
5315         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5316         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5317         txbd->tx_bd_mss_nbytes = pkt_size;
5318         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5319
5320         num_pkts++;
5321         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5322         txr->tx_prod_bseq += pkt_size;
5323
5324         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5325         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5326
5327         udelay(100);
5328
5329         REG_WR(bp, BNX2_HC_COMMAND,
5330                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5331
5332         REG_RD(bp, BNX2_HC_COMMAND);
5333
5334         udelay(5);
5335
5336         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5337         dev_kfree_skb(skb);
5338
5339         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5340                 goto loopback_test_done;
5341
5342         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5343         if (rx_idx != rx_start_idx + num_pkts) {
5344                 goto loopback_test_done;
5345         }
5346
5347         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5348         rx_skb = rx_buf->skb;
5349
5350         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5351         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5352
5353         pci_dma_sync_single_for_cpu(bp->pdev,
5354                 pci_unmap_addr(rx_buf, mapping),
5355                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5356
5357         if (rx_hdr->l2_fhdr_status &
5358                 (L2_FHDR_ERRORS_BAD_CRC |
5359                 L2_FHDR_ERRORS_PHY_DECODE |
5360                 L2_FHDR_ERRORS_ALIGNMENT |
5361                 L2_FHDR_ERRORS_TOO_SHORT |
5362                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5363
5364                 goto loopback_test_done;
5365         }
5366
5367         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5368                 goto loopback_test_done;
5369         }
5370
5371         for (i = 14; i < pkt_size; i++) {
5372                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5373                         goto loopback_test_done;
5374                 }
5375         }
5376
5377         ret = 0;
5378
5379 loopback_test_done:
5380         bp->loopback = 0;
5381         return ret;
5382 }
5383
5384 #define BNX2_MAC_LOOPBACK_FAILED        1
5385 #define BNX2_PHY_LOOPBACK_FAILED        2
5386 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5387                                          BNX2_PHY_LOOPBACK_FAILED)
5388
5389 static int
5390 bnx2_test_loopback(struct bnx2 *bp)
5391 {
5392         int rc = 0;
5393
5394         if (!netif_running(bp->dev))
5395                 return BNX2_LOOPBACK_FAILED;
5396
5397         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5398         spin_lock_bh(&bp->phy_lock);
5399         bnx2_init_phy(bp, 1);
5400         spin_unlock_bh(&bp->phy_lock);
5401         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5402                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5403         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5404                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5405         return rc;
5406 }
5407
5408 #define NVRAM_SIZE 0x200
5409 #define CRC32_RESIDUAL 0xdebb20e3
5410
5411 static int
5412 bnx2_test_nvram(struct bnx2 *bp)
5413 {
5414         __be32 buf[NVRAM_SIZE / 4];
5415         u8 *data = (u8 *) buf;
5416         int rc = 0;
5417         u32 magic, csum;
5418
5419         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5420                 goto test_nvram_done;
5421
5422         magic = be32_to_cpu(buf[0]);
5423         if (magic != 0x669955aa) {
5424                 rc = -ENODEV;
5425                 goto test_nvram_done;
5426         }
5427
5428         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5429                 goto test_nvram_done;
5430
5431         csum = ether_crc_le(0x100, data);
5432         if (csum != CRC32_RESIDUAL) {
5433                 rc = -ENODEV;
5434                 goto test_nvram_done;
5435         }
5436
5437         csum = ether_crc_le(0x100, data + 0x100);
5438         if (csum != CRC32_RESIDUAL) {
5439                 rc = -ENODEV;
5440         }
5441
5442 test_nvram_done:
5443         return rc;
5444 }
5445
5446 static int
5447 bnx2_test_link(struct bnx2 *bp)
5448 {
5449         u32 bmsr;
5450
5451         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5452                 if (bp->link_up)
5453                         return 0;
5454                 return -ENODEV;
5455         }
5456         spin_lock_bh(&bp->phy_lock);
5457         bnx2_enable_bmsr1(bp);
5458         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5459         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5460         bnx2_disable_bmsr1(bp);
5461         spin_unlock_bh(&bp->phy_lock);
5462
5463         if (bmsr & BMSR_LSTATUS) {
5464                 return 0;
5465         }
5466         return -ENODEV;
5467 }
5468
5469 static int
5470 bnx2_test_intr(struct bnx2 *bp)
5471 {
5472         int i;
5473         u16 status_idx;
5474
5475         if (!netif_running(bp->dev))
5476                 return -ENODEV;
5477
5478         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5479
5480         /* This register is not touched during run-time. */
5481         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5482         REG_RD(bp, BNX2_HC_COMMAND);
5483
5484         for (i = 0; i < 10; i++) {
5485                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5486                         status_idx) {
5487
5488                         break;
5489                 }
5490
5491                 msleep_interruptible(10);
5492         }
5493         if (i < 10)
5494                 return 0;
5495
5496         return -ENODEV;
5497 }
5498
5499 /* Determining link for parallel detection. */
5500 static int
5501 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5502 {
5503         u32 mode_ctl, an_dbg, exp;
5504
5505         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5506                 return 0;
5507
5508         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5509         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5510
5511         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5512                 return 0;
5513
5514         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5515         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5516         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5517
5518         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5519                 return 0;
5520
5521         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5522         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5523         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5524
5525         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5526                 return 0;
5527
5528         return 1;
5529 }
5530
5531 static void
5532 bnx2_5706_serdes_timer(struct bnx2 *bp)
5533 {
5534         int check_link = 1;
5535
5536         spin_lock(&bp->phy_lock);
5537         if (bp->serdes_an_pending) {
5538                 bp->serdes_an_pending--;
5539                 check_link = 0;
5540         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5541                 u32 bmcr;
5542
5543                 bp->current_interval = bp->timer_interval;
5544
5545                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5546
5547                 if (bmcr & BMCR_ANENABLE) {
5548                         if (bnx2_5706_serdes_has_link(bp)) {
5549                                 bmcr &= ~BMCR_ANENABLE;
5550                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5551                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5552                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5553                         }
5554                 }
5555         }
5556         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5557                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5558                 u32 phy2;
5559
5560                 bnx2_write_phy(bp, 0x17, 0x0f01);
5561                 bnx2_read_phy(bp, 0x15, &phy2);
5562                 if (phy2 & 0x20) {
5563                         u32 bmcr;
5564
5565                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5566                         bmcr |= BMCR_ANENABLE;
5567                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5568
5569                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5570                 }
5571         } else
5572                 bp->current_interval = bp->timer_interval;
5573
5574         if (check_link) {
5575                 u32 val;
5576
5577                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5578                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5579                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5580
5581                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5582                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5583                                 bnx2_5706s_force_link_dn(bp, 1);
5584                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5585                         } else
5586                                 bnx2_set_link(bp);
5587                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5588                         bnx2_set_link(bp);
5589         }
5590         spin_unlock(&bp->phy_lock);
5591 }
5592
5593 static void
5594 bnx2_5708_serdes_timer(struct bnx2 *bp)
5595 {
5596         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5597                 return;
5598
5599         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5600                 bp->serdes_an_pending = 0;
5601                 return;
5602         }
5603
5604         spin_lock(&bp->phy_lock);
5605         if (bp->serdes_an_pending)
5606                 bp->serdes_an_pending--;
5607         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5608                 u32 bmcr;
5609
5610                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5611                 if (bmcr & BMCR_ANENABLE) {
5612                         bnx2_enable_forced_2g5(bp);
5613                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5614                 } else {
5615                         bnx2_disable_forced_2g5(bp);
5616                         bp->serdes_an_pending = 2;
5617                         bp->current_interval = bp->timer_interval;
5618                 }
5619
5620         } else
5621                 bp->current_interval = bp->timer_interval;
5622
5623         spin_unlock(&bp->phy_lock);
5624 }
5625
5626 static void
5627 bnx2_timer(unsigned long data)
5628 {
5629         struct bnx2 *bp = (struct bnx2 *) data;
5630
5631         if (!netif_running(bp->dev))
5632                 return;
5633
5634         if (atomic_read(&bp->intr_sem) != 0)
5635                 goto bnx2_restart_timer;
5636
5637         bnx2_send_heart_beat(bp);
5638
5639         bp->stats_blk->stat_FwRxDrop =
5640                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5641
5642         /* workaround occasional corrupted counters */
5643         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5644                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5645                                             BNX2_HC_COMMAND_STATS_NOW);
5646
5647         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5648                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5649                         bnx2_5706_serdes_timer(bp);
5650                 else
5651                         bnx2_5708_serdes_timer(bp);
5652         }
5653
5654 bnx2_restart_timer:
5655         mod_timer(&bp->timer, jiffies + bp->current_interval);
5656 }
5657
5658 static int
5659 bnx2_request_irq(struct bnx2 *bp)
5660 {
5661         unsigned long flags;
5662         struct bnx2_irq *irq;
5663         int rc = 0, i;
5664
5665         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5666                 flags = 0;
5667         else
5668                 flags = IRQF_SHARED;
5669
5670         for (i = 0; i < bp->irq_nvecs; i++) {
5671                 irq = &bp->irq_tbl[i];
5672                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5673                                  &bp->bnx2_napi[i]);
5674                 if (rc)
5675                         break;
5676                 irq->requested = 1;
5677         }
5678         return rc;
5679 }
5680
5681 static void
5682 bnx2_free_irq(struct bnx2 *bp)
5683 {
5684         struct bnx2_irq *irq;
5685         int i;
5686
5687         for (i = 0; i < bp->irq_nvecs; i++) {
5688                 irq = &bp->irq_tbl[i];
5689                 if (irq->requested)
5690                         free_irq(irq->vector, &bp->bnx2_napi[i]);
5691                 irq->requested = 0;
5692         }
5693         if (bp->flags & BNX2_FLAG_USING_MSI)
5694                 pci_disable_msi(bp->pdev);
5695         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5696                 pci_disable_msix(bp->pdev);
5697
5698         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5699 }
5700
5701 static void
5702 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5703 {
5704         int i, rc;
5705         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5706
5707         bnx2_setup_msix_tbl(bp);
5708         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5709         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5710         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5711
5712         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5713                 msix_ent[i].entry = i;
5714                 msix_ent[i].vector = 0;
5715
5716                 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5717                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5718         }
5719
5720         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5721         if (rc != 0)
5722                 return;
5723
5724         bp->irq_nvecs = msix_vecs;
5725         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5726         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5727                 bp->irq_tbl[i].vector = msix_ent[i].vector;
5728 }
5729
5730 static void
5731 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5732 {
5733         int cpus = num_online_cpus();
5734         int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5735
5736         bp->irq_tbl[0].handler = bnx2_interrupt;
5737         strcpy(bp->irq_tbl[0].name, bp->dev->name);
5738         bp->irq_nvecs = 1;
5739         bp->irq_tbl[0].vector = bp->pdev->irq;
5740
5741         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5742                 bnx2_enable_msix(bp, msix_vecs);
5743
5744         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5745             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5746                 if (pci_enable_msi(bp->pdev) == 0) {
5747                         bp->flags |= BNX2_FLAG_USING_MSI;
5748                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5749                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5750                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5751                         } else
5752                                 bp->irq_tbl[0].handler = bnx2_msi;
5753
5754                         bp->irq_tbl[0].vector = bp->pdev->irq;
5755                 }
5756         }
5757         bp->num_tx_rings = 1;
5758         bp->num_rx_rings = bp->irq_nvecs;
5759 }
5760
5761 /* Called with rtnl_lock */
5762 static int
5763 bnx2_open(struct net_device *dev)
5764 {
5765         struct bnx2 *bp = netdev_priv(dev);
5766         int rc;
5767
5768         netif_carrier_off(dev);
5769
5770         bnx2_set_power_state(bp, PCI_D0);
5771         bnx2_disable_int(bp);
5772
5773         bnx2_setup_int_mode(bp, disable_msi);
5774         bnx2_napi_enable(bp);
5775         rc = bnx2_alloc_mem(bp);
5776         if (rc) {
5777                 bnx2_napi_disable(bp);
5778                 bnx2_free_mem(bp);
5779                 return rc;
5780         }
5781
5782         rc = bnx2_request_irq(bp);
5783
5784         if (rc) {
5785                 bnx2_napi_disable(bp);
5786                 bnx2_free_mem(bp);
5787                 return rc;
5788         }
5789
5790         rc = bnx2_init_nic(bp, 1);
5791
5792         if (rc) {
5793                 bnx2_napi_disable(bp);
5794                 bnx2_free_irq(bp);
5795                 bnx2_free_skbs(bp);
5796                 bnx2_free_mem(bp);
5797                 return rc;
5798         }
5799
5800         mod_timer(&bp->timer, jiffies + bp->current_interval);
5801
5802         atomic_set(&bp->intr_sem, 0);
5803
5804         bnx2_enable_int(bp);
5805
5806         if (bp->flags & BNX2_FLAG_USING_MSI) {
5807                 /* Test MSI to make sure it is working
5808                  * If MSI test fails, go back to INTx mode
5809                  */
5810                 if (bnx2_test_intr(bp) != 0) {
5811                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5812                                " using MSI, switching to INTx mode. Please"
5813                                " report this failure to the PCI maintainer"
5814                                " and include system chipset information.\n",
5815                                bp->dev->name);
5816
5817                         bnx2_disable_int(bp);
5818                         bnx2_free_irq(bp);
5819
5820                         bnx2_setup_int_mode(bp, 1);
5821
5822                         rc = bnx2_init_nic(bp, 0);
5823
5824                         if (!rc)
5825                                 rc = bnx2_request_irq(bp);
5826
5827                         if (rc) {
5828                                 bnx2_napi_disable(bp);
5829                                 bnx2_free_skbs(bp);
5830                                 bnx2_free_mem(bp);
5831                                 del_timer_sync(&bp->timer);
5832                                 return rc;
5833                         }
5834                         bnx2_enable_int(bp);
5835                 }
5836         }
5837         if (bp->flags & BNX2_FLAG_USING_MSI)
5838                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5839         else if (bp->flags & BNX2_FLAG_USING_MSIX)
5840                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5841
5842         netif_start_queue(dev);
5843
5844         return 0;
5845 }
5846
5847 static void
5848 bnx2_reset_task(struct work_struct *work)
5849 {
5850         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5851
5852         if (!netif_running(bp->dev))
5853                 return;
5854
5855         bnx2_netif_stop(bp);
5856
5857         bnx2_init_nic(bp, 1);
5858
5859         atomic_set(&bp->intr_sem, 1);
5860         bnx2_netif_start(bp);
5861 }
5862
5863 static void
5864 bnx2_tx_timeout(struct net_device *dev)
5865 {
5866         struct bnx2 *bp = netdev_priv(dev);
5867
5868         /* This allows the netif to be shutdown gracefully before resetting */
5869         schedule_work(&bp->reset_task);
5870 }
5871
5872 #ifdef BCM_VLAN
5873 /* Called with rtnl_lock */
5874 static void
5875 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5876 {
5877         struct bnx2 *bp = netdev_priv(dev);
5878
5879         bnx2_netif_stop(bp);
5880
5881         bp->vlgrp = vlgrp;
5882         bnx2_set_rx_mode(dev);
5883
5884         bnx2_netif_start(bp);
5885 }
5886 #endif
5887
5888 /* Called with netif_tx_lock.
5889  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5890  * netif_wake_queue().
5891  */
5892 static int
5893 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5894 {
5895         struct bnx2 *bp = netdev_priv(dev);
5896         dma_addr_t mapping;
5897         struct tx_bd *txbd;
5898         struct sw_bd *tx_buf;
5899         u32 len, vlan_tag_flags, last_frag, mss;
5900         u16 prod, ring_prod;
5901         int i;
5902         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5903         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5904
5905         if (unlikely(bnx2_tx_avail(bp, txr) <
5906             (skb_shinfo(skb)->nr_frags + 1))) {
5907                 netif_stop_queue(dev);
5908                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5909                         dev->name);
5910
5911                 return NETDEV_TX_BUSY;
5912         }
5913         len = skb_headlen(skb);
5914         prod = txr->tx_prod;
5915         ring_prod = TX_RING_IDX(prod);
5916
5917         vlan_tag_flags = 0;
5918         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5919                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5920         }
5921
5922         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5923                 vlan_tag_flags |=
5924                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5925         }
5926         if ((mss = skb_shinfo(skb)->gso_size)) {
5927                 u32 tcp_opt_len, ip_tcp_len;
5928                 struct iphdr *iph;
5929
5930                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5931
5932                 tcp_opt_len = tcp_optlen(skb);
5933
5934                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5935                         u32 tcp_off = skb_transport_offset(skb) -
5936                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5937
5938                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5939                                           TX_BD_FLAGS_SW_FLAGS;
5940                         if (likely(tcp_off == 0))
5941                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5942                         else {
5943                                 tcp_off >>= 3;
5944                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5945                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5946                                                   ((tcp_off & 0x10) <<
5947                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5948                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5949                         }
5950                 } else {
5951                         if (skb_header_cloned(skb) &&
5952                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5953                                 dev_kfree_skb(skb);
5954                                 return NETDEV_TX_OK;
5955                         }
5956
5957                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5958
5959                         iph = ip_hdr(skb);
5960                         iph->check = 0;
5961                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5962                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5963                                                                  iph->daddr, 0,
5964                                                                  IPPROTO_TCP,
5965                                                                  0);
5966                         if (tcp_opt_len || (iph->ihl > 5)) {
5967                                 vlan_tag_flags |= ((iph->ihl - 5) +
5968                                                    (tcp_opt_len >> 2)) << 8;
5969                         }
5970                 }
5971         } else
5972                 mss = 0;
5973
5974         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5975
5976         tx_buf = &txr->tx_buf_ring[ring_prod];
5977         tx_buf->skb = skb;
5978         pci_unmap_addr_set(tx_buf, mapping, mapping);
5979
5980         txbd = &txr->tx_desc_ring[ring_prod];
5981
5982         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5983         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5984         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5985         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5986
5987         last_frag = skb_shinfo(skb)->nr_frags;
5988
5989         for (i = 0; i < last_frag; i++) {
5990                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5991
5992                 prod = NEXT_TX_BD(prod);
5993                 ring_prod = TX_RING_IDX(prod);
5994                 txbd = &txr->tx_desc_ring[ring_prod];
5995
5996                 len = frag->size;
5997                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5998                         len, PCI_DMA_TODEVICE);
5999                 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
6000                                 mapping, mapping);
6001
6002                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6003                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6004                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6005                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6006
6007         }
6008         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6009
6010         prod = NEXT_TX_BD(prod);
6011         txr->tx_prod_bseq += skb->len;
6012
6013         REG_WR16(bp, txr->tx_bidx_addr, prod);
6014         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6015
6016         mmiowb();
6017
6018         txr->tx_prod = prod;
6019         dev->trans_start = jiffies;
6020
6021         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6022                 netif_stop_queue(dev);
6023                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6024                         netif_wake_queue(dev);
6025         }
6026
6027         return NETDEV_TX_OK;
6028 }
6029
6030 /* Called with rtnl_lock */
6031 static int
6032 bnx2_close(struct net_device *dev)
6033 {
6034         struct bnx2 *bp = netdev_priv(dev);
6035         u32 reset_code;
6036
6037         cancel_work_sync(&bp->reset_task);
6038
6039         bnx2_disable_int_sync(bp);
6040         bnx2_napi_disable(bp);
6041         del_timer_sync(&bp->timer);
6042         if (bp->flags & BNX2_FLAG_NO_WOL)
6043                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6044         else if (bp->wol)
6045                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6046         else
6047                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6048         bnx2_reset_chip(bp, reset_code);
6049         bnx2_free_irq(bp);
6050         bnx2_free_skbs(bp);
6051         bnx2_free_mem(bp);
6052         bp->link_up = 0;
6053         netif_carrier_off(bp->dev);
6054         bnx2_set_power_state(bp, PCI_D3hot);
6055         return 0;
6056 }
6057
6058 #define GET_NET_STATS64(ctr)                                    \
6059         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6060         (unsigned long) (ctr##_lo)
6061
6062 #define GET_NET_STATS32(ctr)            \
6063         (ctr##_lo)
6064
6065 #if (BITS_PER_LONG == 64)
6066 #define GET_NET_STATS   GET_NET_STATS64
6067 #else
6068 #define GET_NET_STATS   GET_NET_STATS32
6069 #endif
6070
6071 static struct net_device_stats *
6072 bnx2_get_stats(struct net_device *dev)
6073 {
6074         struct bnx2 *bp = netdev_priv(dev);
6075         struct statistics_block *stats_blk = bp->stats_blk;
6076         struct net_device_stats *net_stats = &bp->net_stats;
6077
6078         if (bp->stats_blk == NULL) {
6079                 return net_stats;
6080         }
6081         net_stats->rx_packets =
6082                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6083                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6084                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6085
6086         net_stats->tx_packets =
6087                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6088                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6089                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6090
6091         net_stats->rx_bytes =
6092                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6093
6094         net_stats->tx_bytes =
6095                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6096
6097         net_stats->multicast =
6098                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6099
6100         net_stats->collisions =
6101                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6102
6103         net_stats->rx_length_errors =
6104                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6105                 stats_blk->stat_EtherStatsOverrsizePkts);
6106
6107         net_stats->rx_over_errors =
6108                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6109
6110         net_stats->rx_frame_errors =
6111                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6112
6113         net_stats->rx_crc_errors =
6114                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6115
6116         net_stats->rx_errors = net_stats->rx_length_errors +
6117                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6118                 net_stats->rx_crc_errors;
6119
6120         net_stats->tx_aborted_errors =
6121                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6122                 stats_blk->stat_Dot3StatsLateCollisions);
6123
6124         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6125             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6126                 net_stats->tx_carrier_errors = 0;
6127         else {
6128                 net_stats->tx_carrier_errors =
6129                         (unsigned long)
6130                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6131         }
6132
6133         net_stats->tx_errors =
6134                 (unsigned long)
6135                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6136                 +
6137                 net_stats->tx_aborted_errors +
6138                 net_stats->tx_carrier_errors;
6139
6140         net_stats->rx_missed_errors =
6141                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6142                 stats_blk->stat_FwRxDrop);
6143
6144         return net_stats;
6145 }
6146
6147 /* All ethtool functions called with rtnl_lock */
6148
6149 static int
6150 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6151 {
6152         struct bnx2 *bp = netdev_priv(dev);
6153         int support_serdes = 0, support_copper = 0;
6154
6155         cmd->supported = SUPPORTED_Autoneg;
6156         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6157                 support_serdes = 1;
6158                 support_copper = 1;
6159         } else if (bp->phy_port == PORT_FIBRE)
6160                 support_serdes = 1;
6161         else
6162                 support_copper = 1;
6163
6164         if (support_serdes) {
6165                 cmd->supported |= SUPPORTED_1000baseT_Full |
6166                         SUPPORTED_FIBRE;
6167                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6168                         cmd->supported |= SUPPORTED_2500baseX_Full;
6169
6170         }
6171         if (support_copper) {
6172                 cmd->supported |= SUPPORTED_10baseT_Half |
6173                         SUPPORTED_10baseT_Full |
6174                         SUPPORTED_100baseT_Half |
6175                         SUPPORTED_100baseT_Full |
6176                         SUPPORTED_1000baseT_Full |
6177                         SUPPORTED_TP;
6178
6179         }
6180
6181         spin_lock_bh(&bp->phy_lock);
6182         cmd->port = bp->phy_port;
6183         cmd->advertising = bp->advertising;
6184
6185         if (bp->autoneg & AUTONEG_SPEED) {
6186                 cmd->autoneg = AUTONEG_ENABLE;
6187         }
6188         else {
6189                 cmd->autoneg = AUTONEG_DISABLE;
6190         }
6191
6192         if (netif_carrier_ok(dev)) {
6193                 cmd->speed = bp->line_speed;
6194                 cmd->duplex = bp->duplex;
6195         }
6196         else {
6197                 cmd->speed = -1;
6198                 cmd->duplex = -1;
6199         }
6200         spin_unlock_bh(&bp->phy_lock);
6201
6202         cmd->transceiver = XCVR_INTERNAL;
6203         cmd->phy_address = bp->phy_addr;
6204
6205         return 0;
6206 }
6207
6208 static int
6209 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6210 {
6211         struct bnx2 *bp = netdev_priv(dev);
6212         u8 autoneg = bp->autoneg;
6213         u8 req_duplex = bp->req_duplex;
6214         u16 req_line_speed = bp->req_line_speed;
6215         u32 advertising = bp->advertising;
6216         int err = -EINVAL;
6217
6218         spin_lock_bh(&bp->phy_lock);
6219
6220         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6221                 goto err_out_unlock;
6222
6223         if (cmd->port != bp->phy_port &&
6224             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6225                 goto err_out_unlock;
6226
6227         if (cmd->autoneg == AUTONEG_ENABLE) {
6228                 autoneg |= AUTONEG_SPEED;
6229
6230                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6231
6232                 /* allow advertising 1 speed */
6233                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6234                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6235                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6236                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6237
6238                         if (cmd->port == PORT_FIBRE)
6239                                 goto err_out_unlock;
6240
6241                         advertising = cmd->advertising;
6242
6243                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6244                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6245                             (cmd->port == PORT_TP))
6246                                 goto err_out_unlock;
6247                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6248                         advertising = cmd->advertising;
6249                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6250                         goto err_out_unlock;
6251                 else {
6252                         if (cmd->port == PORT_FIBRE)
6253                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6254                         else
6255                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6256                 }
6257                 advertising |= ADVERTISED_Autoneg;
6258         }
6259         else {
6260                 if (cmd->port == PORT_FIBRE) {
6261                         if ((cmd->speed != SPEED_1000 &&
6262                              cmd->speed != SPEED_2500) ||
6263                             (cmd->duplex != DUPLEX_FULL))
6264                                 goto err_out_unlock;
6265
6266                         if (cmd->speed == SPEED_2500 &&
6267                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6268                                 goto err_out_unlock;
6269                 }
6270                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6271                         goto err_out_unlock;
6272
6273                 autoneg &= ~AUTONEG_SPEED;
6274                 req_line_speed = cmd->speed;
6275                 req_duplex = cmd->duplex;
6276                 advertising = 0;
6277         }
6278
6279         bp->autoneg = autoneg;
6280         bp->advertising = advertising;
6281         bp->req_line_speed = req_line_speed;
6282         bp->req_duplex = req_duplex;
6283
6284         err = bnx2_setup_phy(bp, cmd->port);
6285
6286 err_out_unlock:
6287         spin_unlock_bh(&bp->phy_lock);
6288
6289         return err;
6290 }
6291
6292 static void
6293 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6294 {
6295         struct bnx2 *bp = netdev_priv(dev);
6296
6297         strcpy(info->driver, DRV_MODULE_NAME);
6298         strcpy(info->version, DRV_MODULE_VERSION);
6299         strcpy(info->bus_info, pci_name(bp->pdev));
6300         strcpy(info->fw_version, bp->fw_version);
6301 }
6302
6303 #define BNX2_REGDUMP_LEN                (32 * 1024)
6304
6305 static int
6306 bnx2_get_regs_len(struct net_device *dev)
6307 {
6308         return BNX2_REGDUMP_LEN;
6309 }
6310
6311 static void
6312 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6313 {
6314         u32 *p = _p, i, offset;
6315         u8 *orig_p = _p;
6316         struct bnx2 *bp = netdev_priv(dev);
6317         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6318                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6319                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6320                                  0x1040, 0x1048, 0x1080, 0x10a4,
6321                                  0x1400, 0x1490, 0x1498, 0x14f0,
6322                                  0x1500, 0x155c, 0x1580, 0x15dc,
6323                                  0x1600, 0x1658, 0x1680, 0x16d8,
6324                                  0x1800, 0x1820, 0x1840, 0x1854,
6325                                  0x1880, 0x1894, 0x1900, 0x1984,
6326                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6327                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6328                                  0x2000, 0x2030, 0x23c0, 0x2400,
6329                                  0x2800, 0x2820, 0x2830, 0x2850,
6330                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6331                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6332                                  0x4080, 0x4090, 0x43c0, 0x4458,
6333                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6334                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6335                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6336                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6337                                  0x6800, 0x6848, 0x684c, 0x6860,
6338                                  0x6888, 0x6910, 0x8000 };
6339
6340         regs->version = 0;
6341
6342         memset(p, 0, BNX2_REGDUMP_LEN);
6343
6344         if (!netif_running(bp->dev))
6345                 return;
6346
6347         i = 0;
6348         offset = reg_boundaries[0];
6349         p += offset;
6350         while (offset < BNX2_REGDUMP_LEN) {
6351                 *p++ = REG_RD(bp, offset);
6352                 offset += 4;
6353                 if (offset == reg_boundaries[i + 1]) {
6354                         offset = reg_boundaries[i + 2];
6355                         p = (u32 *) (orig_p + offset);
6356                         i += 2;
6357                 }
6358         }
6359 }
6360
6361 static void
6362 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6363 {
6364         struct bnx2 *bp = netdev_priv(dev);
6365
6366         if (bp->flags & BNX2_FLAG_NO_WOL) {
6367                 wol->supported = 0;
6368                 wol->wolopts = 0;
6369         }
6370         else {
6371                 wol->supported = WAKE_MAGIC;
6372                 if (bp->wol)
6373                         wol->wolopts = WAKE_MAGIC;
6374                 else
6375                         wol->wolopts = 0;
6376         }
6377         memset(&wol->sopass, 0, sizeof(wol->sopass));
6378 }
6379
6380 static int
6381 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6382 {
6383         struct bnx2 *bp = netdev_priv(dev);
6384
6385         if (wol->wolopts & ~WAKE_MAGIC)
6386                 return -EINVAL;
6387
6388         if (wol->wolopts & WAKE_MAGIC) {
6389                 if (bp->flags & BNX2_FLAG_NO_WOL)
6390                         return -EINVAL;
6391
6392                 bp->wol = 1;
6393         }
6394         else {
6395                 bp->wol = 0;
6396         }
6397         return 0;
6398 }
6399
6400 static int
6401 bnx2_nway_reset(struct net_device *dev)
6402 {
6403         struct bnx2 *bp = netdev_priv(dev);
6404         u32 bmcr;
6405
6406         if (!(bp->autoneg & AUTONEG_SPEED)) {
6407                 return -EINVAL;
6408         }
6409
6410         spin_lock_bh(&bp->phy_lock);
6411
6412         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6413                 int rc;
6414
6415                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6416                 spin_unlock_bh(&bp->phy_lock);
6417                 return rc;
6418         }
6419
6420         /* Force a link down visible on the other side */
6421         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6422                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6423                 spin_unlock_bh(&bp->phy_lock);
6424
6425                 msleep(20);
6426
6427                 spin_lock_bh(&bp->phy_lock);
6428
6429                 bp->current_interval = SERDES_AN_TIMEOUT;
6430                 bp->serdes_an_pending = 1;
6431                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6432         }
6433
6434         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6435         bmcr &= ~BMCR_LOOPBACK;
6436         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6437
6438         spin_unlock_bh(&bp->phy_lock);
6439
6440         return 0;
6441 }
6442
6443 static int
6444 bnx2_get_eeprom_len(struct net_device *dev)
6445 {
6446         struct bnx2 *bp = netdev_priv(dev);
6447
6448         if (bp->flash_info == NULL)
6449                 return 0;
6450
6451         return (int) bp->flash_size;
6452 }
6453
6454 static int
6455 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6456                 u8 *eebuf)
6457 {
6458         struct bnx2 *bp = netdev_priv(dev);
6459         int rc;
6460
6461         /* parameters already validated in ethtool_get_eeprom */
6462
6463         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6464
6465         return rc;
6466 }
6467
6468 static int
6469 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6470                 u8 *eebuf)
6471 {
6472         struct bnx2 *bp = netdev_priv(dev);
6473         int rc;
6474
6475         /* parameters already validated in ethtool_set_eeprom */
6476
6477         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6478
6479         return rc;
6480 }
6481
6482 static int
6483 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6484 {
6485         struct bnx2 *bp = netdev_priv(dev);
6486
6487         memset(coal, 0, sizeof(struct ethtool_coalesce));
6488
6489         coal->rx_coalesce_usecs = bp->rx_ticks;
6490         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6491         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6492         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6493
6494         coal->tx_coalesce_usecs = bp->tx_ticks;
6495         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6496         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6497         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6498
6499         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6500
6501         return 0;
6502 }
6503
6504 static int
6505 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6506 {
6507         struct bnx2 *bp = netdev_priv(dev);
6508
6509         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6510         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6511
6512         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6513         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6514
6515         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6516         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6517
6518         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6519         if (bp->rx_quick_cons_trip_int > 0xff)
6520                 bp->rx_quick_cons_trip_int = 0xff;
6521
6522         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6523         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6524
6525         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6526         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6527
6528         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6529         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6530
6531         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6532         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6533                 0xff;
6534
6535         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6536         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6537                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6538                         bp->stats_ticks = USEC_PER_SEC;
6539         }
6540         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6541                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6542         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6543
6544         if (netif_running(bp->dev)) {
6545                 bnx2_netif_stop(bp);
6546                 bnx2_init_nic(bp, 0);
6547                 bnx2_netif_start(bp);
6548         }
6549
6550         return 0;
6551 }
6552
6553 static void
6554 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6555 {
6556         struct bnx2 *bp = netdev_priv(dev);
6557
6558         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6559         ering->rx_mini_max_pending = 0;
6560         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6561
6562         ering->rx_pending = bp->rx_ring_size;
6563         ering->rx_mini_pending = 0;
6564         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6565
6566         ering->tx_max_pending = MAX_TX_DESC_CNT;
6567         ering->tx_pending = bp->tx_ring_size;
6568 }
6569
6570 static int
6571 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6572 {
6573         if (netif_running(bp->dev)) {
6574                 bnx2_netif_stop(bp);
6575                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6576                 bnx2_free_skbs(bp);
6577                 bnx2_free_mem(bp);
6578         }
6579
6580         bnx2_set_rx_ring_size(bp, rx);
6581         bp->tx_ring_size = tx;
6582
6583         if (netif_running(bp->dev)) {
6584                 int rc;
6585
6586                 rc = bnx2_alloc_mem(bp);
6587                 if (rc)
6588                         return rc;
6589                 bnx2_init_nic(bp, 0);
6590                 bnx2_netif_start(bp);
6591         }
6592         return 0;
6593 }
6594
6595 static int
6596 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6597 {
6598         struct bnx2 *bp = netdev_priv(dev);
6599         int rc;
6600
6601         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6602                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6603                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6604
6605                 return -EINVAL;
6606         }
6607         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6608         return rc;
6609 }
6610
6611 static void
6612 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6613 {
6614         struct bnx2 *bp = netdev_priv(dev);
6615
6616         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6617         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6618         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6619 }
6620
6621 static int
6622 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6623 {
6624         struct bnx2 *bp = netdev_priv(dev);
6625
6626         bp->req_flow_ctrl = 0;
6627         if (epause->rx_pause)
6628                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6629         if (epause->tx_pause)
6630                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6631
6632         if (epause->autoneg) {
6633                 bp->autoneg |= AUTONEG_FLOW_CTRL;
6634         }
6635         else {
6636                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6637         }
6638
6639         spin_lock_bh(&bp->phy_lock);
6640
6641         bnx2_setup_phy(bp, bp->phy_port);
6642
6643         spin_unlock_bh(&bp->phy_lock);
6644
6645         return 0;
6646 }
6647
6648 static u32
6649 bnx2_get_rx_csum(struct net_device *dev)
6650 {
6651         struct bnx2 *bp = netdev_priv(dev);
6652
6653         return bp->rx_csum;
6654 }
6655
6656 static int
6657 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6658 {
6659         struct bnx2 *bp = netdev_priv(dev);
6660
6661         bp->rx_csum = data;
6662         return 0;
6663 }
6664
6665 static int
6666 bnx2_set_tso(struct net_device *dev, u32 data)
6667 {
6668         struct bnx2 *bp = netdev_priv(dev);
6669
6670         if (data) {
6671                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6672                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6673                         dev->features |= NETIF_F_TSO6;
6674         } else
6675                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6676                                    NETIF_F_TSO_ECN);
6677         return 0;
6678 }
6679
6680 #define BNX2_NUM_STATS 46
6681
6682 static struct {
6683         char string[ETH_GSTRING_LEN];
6684 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6685         { "rx_bytes" },
6686         { "rx_error_bytes" },
6687         { "tx_bytes" },
6688         { "tx_error_bytes" },
6689         { "rx_ucast_packets" },
6690         { "rx_mcast_packets" },
6691         { "rx_bcast_packets" },
6692         { "tx_ucast_packets" },
6693         { "tx_mcast_packets" },
6694         { "tx_bcast_packets" },
6695         { "tx_mac_errors" },
6696         { "tx_carrier_errors" },
6697         { "rx_crc_errors" },
6698         { "rx_align_errors" },
6699         { "tx_single_collisions" },
6700         { "tx_multi_collisions" },
6701         { "tx_deferred" },
6702         { "tx_excess_collisions" },
6703         { "tx_late_collisions" },
6704         { "tx_total_collisions" },
6705         { "rx_fragments" },
6706         { "rx_jabbers" },
6707         { "rx_undersize_packets" },
6708         { "rx_oversize_packets" },
6709         { "rx_64_byte_packets" },
6710         { "rx_65_to_127_byte_packets" },
6711         { "rx_128_to_255_byte_packets" },
6712         { "rx_256_to_511_byte_packets" },
6713         { "rx_512_to_1023_byte_packets" },
6714         { "rx_1024_to_1522_byte_packets" },
6715         { "rx_1523_to_9022_byte_packets" },
6716         { "tx_64_byte_packets" },
6717         { "tx_65_to_127_byte_packets" },
6718         { "tx_128_to_255_byte_packets" },
6719         { "tx_256_to_511_byte_packets" },
6720         { "tx_512_to_1023_byte_packets" },
6721         { "tx_1024_to_1522_byte_packets" },
6722         { "tx_1523_to_9022_byte_packets" },
6723         { "rx_xon_frames" },
6724         { "rx_xoff_frames" },
6725         { "tx_xon_frames" },
6726         { "tx_xoff_frames" },
6727         { "rx_mac_ctrl_frames" },
6728         { "rx_filtered_packets" },
6729         { "rx_discards" },
6730         { "rx_fw_discards" },
6731 };
6732
6733 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6734
6735 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6736     STATS_OFFSET32(stat_IfHCInOctets_hi),
6737     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6738     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6739     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6740     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6741     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6742     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6743     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6744     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6745     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6746     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6747     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6748     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6749     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6750     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6751     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6752     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6753     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6754     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6755     STATS_OFFSET32(stat_EtherStatsCollisions),
6756     STATS_OFFSET32(stat_EtherStatsFragments),
6757     STATS_OFFSET32(stat_EtherStatsJabbers),
6758     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6759     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6760     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6761     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6762     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6763     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6764     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6765     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6766     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6767     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6768     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6769     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6770     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6771     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6772     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6773     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6774     STATS_OFFSET32(stat_XonPauseFramesReceived),
6775     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6776     STATS_OFFSET32(stat_OutXonSent),
6777     STATS_OFFSET32(stat_OutXoffSent),
6778     STATS_OFFSET32(stat_MacControlFramesReceived),
6779     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6780     STATS_OFFSET32(stat_IfInMBUFDiscards),
6781     STATS_OFFSET32(stat_FwRxDrop),
6782 };
6783
6784 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6785  * skipped because of errata.
6786  */
6787 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6788         8,0,8,8,8,8,8,8,8,8,
6789         4,0,4,4,4,4,4,4,4,4,
6790         4,4,4,4,4,4,4,4,4,4,
6791         4,4,4,4,4,4,4,4,4,4,
6792         4,4,4,4,4,4,
6793 };
6794
6795 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6796         8,0,8,8,8,8,8,8,8,8,
6797         4,4,4,4,4,4,4,4,4,4,
6798         4,4,4,4,4,4,4,4,4,4,
6799         4,4,4,4,4,4,4,4,4,4,
6800         4,4,4,4,4,4,
6801 };
6802
6803 #define BNX2_NUM_TESTS 6
6804
6805 static struct {
6806         char string[ETH_GSTRING_LEN];
6807 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6808         { "register_test (offline)" },
6809         { "memory_test (offline)" },
6810         { "loopback_test (offline)" },
6811         { "nvram_test (online)" },
6812         { "interrupt_test (online)" },
6813         { "link_test (online)" },
6814 };
6815
6816 static int
6817 bnx2_get_sset_count(struct net_device *dev, int sset)
6818 {
6819         switch (sset) {
6820         case ETH_SS_TEST:
6821                 return BNX2_NUM_TESTS;
6822         case ETH_SS_STATS:
6823                 return BNX2_NUM_STATS;
6824         default:
6825                 return -EOPNOTSUPP;
6826         }
6827 }
6828
6829 static void
6830 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6831 {
6832         struct bnx2 *bp = netdev_priv(dev);
6833
6834         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6835         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6836                 int i;
6837
6838                 bnx2_netif_stop(bp);
6839                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6840                 bnx2_free_skbs(bp);
6841
6842                 if (bnx2_test_registers(bp) != 0) {
6843                         buf[0] = 1;
6844                         etest->flags |= ETH_TEST_FL_FAILED;
6845                 }
6846                 if (bnx2_test_memory(bp) != 0) {
6847                         buf[1] = 1;
6848                         etest->flags |= ETH_TEST_FL_FAILED;
6849                 }
6850                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6851                         etest->flags |= ETH_TEST_FL_FAILED;
6852
6853                 if (!netif_running(bp->dev)) {
6854                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6855                 }
6856                 else {
6857                         bnx2_init_nic(bp, 1);
6858                         bnx2_netif_start(bp);
6859                 }
6860
6861                 /* wait for link up */
6862                 for (i = 0; i < 7; i++) {
6863                         if (bp->link_up)
6864                                 break;
6865                         msleep_interruptible(1000);
6866                 }
6867         }
6868
6869         if (bnx2_test_nvram(bp) != 0) {
6870                 buf[3] = 1;
6871                 etest->flags |= ETH_TEST_FL_FAILED;
6872         }
6873         if (bnx2_test_intr(bp) != 0) {
6874                 buf[4] = 1;
6875                 etest->flags |= ETH_TEST_FL_FAILED;
6876         }
6877
6878         if (bnx2_test_link(bp) != 0) {
6879                 buf[5] = 1;
6880                 etest->flags |= ETH_TEST_FL_FAILED;
6881
6882         }
6883 }
6884
6885 static void
6886 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6887 {
6888         switch (stringset) {
6889         case ETH_SS_STATS:
6890                 memcpy(buf, bnx2_stats_str_arr,
6891                         sizeof(bnx2_stats_str_arr));
6892                 break;
6893         case ETH_SS_TEST:
6894                 memcpy(buf, bnx2_tests_str_arr,
6895                         sizeof(bnx2_tests_str_arr));
6896                 break;
6897         }
6898 }
6899
6900 static void
6901 bnx2_get_ethtool_stats(struct net_device *dev,
6902                 struct ethtool_stats *stats, u64 *buf)
6903 {
6904         struct bnx2 *bp = netdev_priv(dev);
6905         int i;
6906         u32 *hw_stats = (u32 *) bp->stats_blk;
6907         u8 *stats_len_arr = NULL;
6908
6909         if (hw_stats == NULL) {
6910                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6911                 return;
6912         }
6913
6914         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6915             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6916             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6917             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6918                 stats_len_arr = bnx2_5706_stats_len_arr;
6919         else
6920                 stats_len_arr = bnx2_5708_stats_len_arr;
6921
6922         for (i = 0; i < BNX2_NUM_STATS; i++) {
6923                 if (stats_len_arr[i] == 0) {
6924                         /* skip this counter */
6925                         buf[i] = 0;
6926                         continue;
6927                 }
6928                 if (stats_len_arr[i] == 4) {
6929                         /* 4-byte counter */
6930                         buf[i] = (u64)
6931                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6932                         continue;
6933                 }
6934                 /* 8-byte counter */
6935                 buf[i] = (((u64) *(hw_stats +
6936                                         bnx2_stats_offset_arr[i])) << 32) +
6937                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6938         }
6939 }
6940
6941 static int
6942 bnx2_phys_id(struct net_device *dev, u32 data)
6943 {
6944         struct bnx2 *bp = netdev_priv(dev);
6945         int i;
6946         u32 save;
6947
6948         if (data == 0)
6949                 data = 2;
6950
6951         save = REG_RD(bp, BNX2_MISC_CFG);
6952         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6953
6954         for (i = 0; i < (data * 2); i++) {
6955                 if ((i % 2) == 0) {
6956                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6957                 }
6958                 else {
6959                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6960                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6961                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6962                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6963                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6964                                 BNX2_EMAC_LED_TRAFFIC);
6965                 }
6966                 msleep_interruptible(500);
6967                 if (signal_pending(current))
6968                         break;
6969         }
6970         REG_WR(bp, BNX2_EMAC_LED, 0);
6971         REG_WR(bp, BNX2_MISC_CFG, save);
6972         return 0;
6973 }
6974
6975 static int
6976 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6977 {
6978         struct bnx2 *bp = netdev_priv(dev);
6979
6980         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6981                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6982         else
6983                 return (ethtool_op_set_tx_csum(dev, data));
6984 }
6985
6986 static const struct ethtool_ops bnx2_ethtool_ops = {
6987         .get_settings           = bnx2_get_settings,
6988         .set_settings           = bnx2_set_settings,
6989         .get_drvinfo            = bnx2_get_drvinfo,
6990         .get_regs_len           = bnx2_get_regs_len,
6991         .get_regs               = bnx2_get_regs,
6992         .get_wol                = bnx2_get_wol,
6993         .set_wol                = bnx2_set_wol,
6994         .nway_reset             = bnx2_nway_reset,
6995         .get_link               = ethtool_op_get_link,
6996         .get_eeprom_len         = bnx2_get_eeprom_len,
6997         .get_eeprom             = bnx2_get_eeprom,
6998         .set_eeprom             = bnx2_set_eeprom,
6999         .get_coalesce           = bnx2_get_coalesce,
7000         .set_coalesce           = bnx2_set_coalesce,
7001         .get_ringparam          = bnx2_get_ringparam,
7002         .set_ringparam          = bnx2_set_ringparam,
7003         .get_pauseparam         = bnx2_get_pauseparam,
7004         .set_pauseparam         = bnx2_set_pauseparam,
7005         .get_rx_csum            = bnx2_get_rx_csum,
7006         .set_rx_csum            = bnx2_set_rx_csum,
7007         .set_tx_csum            = bnx2_set_tx_csum,
7008         .set_sg                 = ethtool_op_set_sg,
7009         .set_tso                = bnx2_set_tso,
7010         .self_test              = bnx2_self_test,
7011         .get_strings            = bnx2_get_strings,
7012         .phys_id                = bnx2_phys_id,
7013         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7014         .get_sset_count         = bnx2_get_sset_count,
7015 };
7016
7017 /* Called with rtnl_lock */
7018 static int
7019 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7020 {
7021         struct mii_ioctl_data *data = if_mii(ifr);
7022         struct bnx2 *bp = netdev_priv(dev);
7023         int err;
7024
7025         switch(cmd) {
7026         case SIOCGMIIPHY:
7027                 data->phy_id = bp->phy_addr;
7028
7029                 /* fallthru */
7030         case SIOCGMIIREG: {
7031                 u32 mii_regval;
7032
7033                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7034                         return -EOPNOTSUPP;
7035
7036                 if (!netif_running(dev))
7037                         return -EAGAIN;
7038
7039                 spin_lock_bh(&bp->phy_lock);
7040                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7041                 spin_unlock_bh(&bp->phy_lock);
7042
7043                 data->val_out = mii_regval;
7044
7045                 return err;
7046         }
7047
7048         case SIOCSMIIREG:
7049                 if (!capable(CAP_NET_ADMIN))
7050                         return -EPERM;
7051
7052                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7053                         return -EOPNOTSUPP;
7054
7055                 if (!netif_running(dev))
7056                         return -EAGAIN;
7057
7058                 spin_lock_bh(&bp->phy_lock);
7059                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7060                 spin_unlock_bh(&bp->phy_lock);
7061
7062                 return err;
7063
7064         default:
7065                 /* do nothing */
7066                 break;
7067         }
7068         return -EOPNOTSUPP;
7069 }
7070
7071 /* Called with rtnl_lock */
7072 static int
7073 bnx2_change_mac_addr(struct net_device *dev, void *p)
7074 {
7075         struct sockaddr *addr = p;
7076         struct bnx2 *bp = netdev_priv(dev);
7077
7078         if (!is_valid_ether_addr(addr->sa_data))
7079                 return -EINVAL;
7080
7081         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7082         if (netif_running(dev))
7083                 bnx2_set_mac_addr(bp);
7084
7085         return 0;
7086 }
7087
7088 /* Called with rtnl_lock */
7089 static int
7090 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7091 {
7092         struct bnx2 *bp = netdev_priv(dev);
7093
7094         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7095                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7096                 return -EINVAL;
7097
7098         dev->mtu = new_mtu;
7099         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7100 }
7101
7102 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7103 static void
7104 poll_bnx2(struct net_device *dev)
7105 {
7106         struct bnx2 *bp = netdev_priv(dev);
7107
7108         disable_irq(bp->pdev->irq);
7109         bnx2_interrupt(bp->pdev->irq, dev);
7110         enable_irq(bp->pdev->irq);
7111 }
7112 #endif
7113
7114 static void __devinit
7115 bnx2_get_5709_media(struct bnx2 *bp)
7116 {
7117         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7118         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7119         u32 strap;
7120
7121         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7122                 return;
7123         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7124                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7125                 return;
7126         }
7127
7128         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7129                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7130         else
7131                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7132
7133         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7134                 switch (strap) {
7135                 case 0x4:
7136                 case 0x5:
7137                 case 0x6:
7138                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7139                         return;
7140                 }
7141         } else {
7142                 switch (strap) {
7143                 case 0x1:
7144                 case 0x2:
7145                 case 0x4:
7146                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7147                         return;
7148                 }
7149         }
7150 }
7151
7152 static void __devinit
7153 bnx2_get_pci_speed(struct bnx2 *bp)
7154 {
7155         u32 reg;
7156
7157         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7158         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7159                 u32 clkreg;
7160
7161                 bp->flags |= BNX2_FLAG_PCIX;
7162
7163                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7164
7165                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7166                 switch (clkreg) {
7167                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7168                         bp->bus_speed_mhz = 133;
7169                         break;
7170
7171                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7172                         bp->bus_speed_mhz = 100;
7173                         break;
7174
7175                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7176                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7177                         bp->bus_speed_mhz = 66;
7178                         break;
7179
7180                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7181                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7182                         bp->bus_speed_mhz = 50;
7183                         break;
7184
7185                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7186                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7187                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7188                         bp->bus_speed_mhz = 33;
7189                         break;
7190                 }
7191         }
7192         else {
7193                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7194                         bp->bus_speed_mhz = 66;
7195                 else
7196                         bp->bus_speed_mhz = 33;
7197         }
7198
7199         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7200                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7201
7202 }
7203
7204 static int __devinit
7205 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7206 {
7207         struct bnx2 *bp;
7208         unsigned long mem_len;
7209         int rc, i, j;
7210         u32 reg;
7211         u64 dma_mask, persist_dma_mask;
7212
7213         SET_NETDEV_DEV(dev, &pdev->dev);
7214         bp = netdev_priv(dev);
7215
7216         bp->flags = 0;
7217         bp->phy_flags = 0;
7218
7219         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7220         rc = pci_enable_device(pdev);
7221         if (rc) {
7222                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7223                 goto err_out;
7224         }
7225
7226         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7227                 dev_err(&pdev->dev,
7228                         "Cannot find PCI device base address, aborting.\n");
7229                 rc = -ENODEV;
7230                 goto err_out_disable;
7231         }
7232
7233         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7234         if (rc) {
7235                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7236                 goto err_out_disable;
7237         }
7238
7239         pci_set_master(pdev);
7240         pci_save_state(pdev);
7241
7242         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7243         if (bp->pm_cap == 0) {
7244                 dev_err(&pdev->dev,
7245                         "Cannot find power management capability, aborting.\n");
7246                 rc = -EIO;
7247                 goto err_out_release;
7248         }
7249
7250         bp->dev = dev;
7251         bp->pdev = pdev;
7252
7253         spin_lock_init(&bp->phy_lock);
7254         spin_lock_init(&bp->indirect_lock);
7255         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7256
7257         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7258         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7259         dev->mem_end = dev->mem_start + mem_len;
7260         dev->irq = pdev->irq;
7261
7262         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7263
7264         if (!bp->regview) {
7265                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7266                 rc = -ENOMEM;
7267                 goto err_out_release;
7268         }
7269
7270         /* Configure byte swap and enable write to the reg_window registers.
7271          * Rely on CPU to do target byte swapping on big endian systems
7272          * The chip's target access swapping will not swap all accesses
7273          */
7274         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7275                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7276                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7277
7278         bnx2_set_power_state(bp, PCI_D0);
7279
7280         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7281
7282         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7283                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7284                         dev_err(&pdev->dev,
7285                                 "Cannot find PCIE capability, aborting.\n");
7286                         rc = -EIO;
7287                         goto err_out_unmap;
7288                 }
7289                 bp->flags |= BNX2_FLAG_PCIE;
7290                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7291                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7292         } else {
7293                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7294                 if (bp->pcix_cap == 0) {
7295                         dev_err(&pdev->dev,
7296                                 "Cannot find PCIX capability, aborting.\n");
7297                         rc = -EIO;
7298                         goto err_out_unmap;
7299                 }
7300         }
7301
7302         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7303                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7304                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7305         }
7306
7307         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7308                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7309                         bp->flags |= BNX2_FLAG_MSI_CAP;
7310         }
7311
7312         /* 5708 cannot support DMA addresses > 40-bit.  */
7313         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7314                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7315         else
7316                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7317
7318         /* Configure DMA attributes. */
7319         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7320                 dev->features |= NETIF_F_HIGHDMA;
7321                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7322                 if (rc) {
7323                         dev_err(&pdev->dev,
7324                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7325                         goto err_out_unmap;
7326                 }
7327         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7328                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7329                 goto err_out_unmap;
7330         }
7331
7332         if (!(bp->flags & BNX2_FLAG_PCIE))
7333                 bnx2_get_pci_speed(bp);
7334
7335         /* 5706A0 may falsely detect SERR and PERR. */
7336         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7337                 reg = REG_RD(bp, PCI_COMMAND);
7338                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7339                 REG_WR(bp, PCI_COMMAND, reg);
7340         }
7341         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7342                 !(bp->flags & BNX2_FLAG_PCIX)) {
7343
7344                 dev_err(&pdev->dev,
7345                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7346                 goto err_out_unmap;
7347         }
7348
7349         bnx2_init_nvram(bp);
7350
7351         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7352
7353         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7354             BNX2_SHM_HDR_SIGNATURE_SIG) {
7355                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7356
7357                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7358         } else
7359                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7360
7361         /* Get the permanent MAC address.  First we need to make sure the
7362          * firmware is actually running.
7363          */
7364         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7365
7366         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7367             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7368                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7369                 rc = -ENODEV;
7370                 goto err_out_unmap;
7371         }
7372
7373         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7374         for (i = 0, j = 0; i < 3; i++) {
7375                 u8 num, k, skip0;
7376
7377                 num = (u8) (reg >> (24 - (i * 8)));
7378                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7379                         if (num >= k || !skip0 || k == 1) {
7380                                 bp->fw_version[j++] = (num / k) + '0';
7381                                 skip0 = 0;
7382                         }
7383                 }
7384                 if (i != 2)
7385                         bp->fw_version[j++] = '.';
7386         }
7387         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7388         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7389                 bp->wol = 1;
7390
7391         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7392                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7393
7394                 for (i = 0; i < 30; i++) {
7395                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7396                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7397                                 break;
7398                         msleep(10);
7399                 }
7400         }
7401         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7402         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7403         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7404             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7405                 int i;
7406                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7407
7408                 bp->fw_version[j++] = ' ';
7409                 for (i = 0; i < 3; i++) {
7410                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7411                         reg = swab32(reg);
7412                         memcpy(&bp->fw_version[j], &reg, 4);
7413                         j += 4;
7414                 }
7415         }
7416
7417         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7418         bp->mac_addr[0] = (u8) (reg >> 8);
7419         bp->mac_addr[1] = (u8) reg;
7420
7421         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7422         bp->mac_addr[2] = (u8) (reg >> 24);
7423         bp->mac_addr[3] = (u8) (reg >> 16);
7424         bp->mac_addr[4] = (u8) (reg >> 8);
7425         bp->mac_addr[5] = (u8) reg;
7426
7427         bp->tx_ring_size = MAX_TX_DESC_CNT;
7428         bnx2_set_rx_ring_size(bp, 255);
7429
7430         bp->rx_csum = 1;
7431
7432         bp->tx_quick_cons_trip_int = 20;
7433         bp->tx_quick_cons_trip = 20;
7434         bp->tx_ticks_int = 80;
7435         bp->tx_ticks = 80;
7436
7437         bp->rx_quick_cons_trip_int = 6;
7438         bp->rx_quick_cons_trip = 6;
7439         bp->rx_ticks_int = 18;
7440         bp->rx_ticks = 18;
7441
7442         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7443
7444         bp->timer_interval =  HZ;
7445         bp->current_interval =  HZ;
7446
7447         bp->phy_addr = 1;
7448
7449         /* Disable WOL support if we are running on a SERDES chip. */
7450         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7451                 bnx2_get_5709_media(bp);
7452         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7453                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7454
7455         bp->phy_port = PORT_TP;
7456         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7457                 bp->phy_port = PORT_FIBRE;
7458                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7459                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7460                         bp->flags |= BNX2_FLAG_NO_WOL;
7461                         bp->wol = 0;
7462                 }
7463                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7464                         /* Don't do parallel detect on this board because of
7465                          * some board problems.  The link will not go down
7466                          * if we do parallel detect.
7467                          */
7468                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7469                             pdev->subsystem_device == 0x310c)
7470                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7471                 } else {
7472                         bp->phy_addr = 2;
7473                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7474                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7475                 }
7476                 bnx2_init_remote_phy(bp);
7477
7478         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7479                    CHIP_NUM(bp) == CHIP_NUM_5708)
7480                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7481         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7482                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7483                   CHIP_REV(bp) == CHIP_REV_Bx))
7484                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7485
7486         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7487             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7488             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7489                 bp->flags |= BNX2_FLAG_NO_WOL;
7490                 bp->wol = 0;
7491         }
7492
7493         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7494                 bp->tx_quick_cons_trip_int =
7495                         bp->tx_quick_cons_trip;
7496                 bp->tx_ticks_int = bp->tx_ticks;
7497                 bp->rx_quick_cons_trip_int =
7498                         bp->rx_quick_cons_trip;
7499                 bp->rx_ticks_int = bp->rx_ticks;
7500                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7501                 bp->com_ticks_int = bp->com_ticks;
7502                 bp->cmd_ticks_int = bp->cmd_ticks;
7503         }
7504
7505         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7506          *
7507          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7508          * with byte enables disabled on the unused 32-bit word.  This is legal
7509          * but causes problems on the AMD 8132 which will eventually stop
7510          * responding after a while.
7511          *
7512          * AMD believes this incompatibility is unique to the 5706, and
7513          * prefers to locally disable MSI rather than globally disabling it.
7514          */
7515         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7516                 struct pci_dev *amd_8132 = NULL;
7517
7518                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7519                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7520                                                   amd_8132))) {
7521
7522                         if (amd_8132->revision >= 0x10 &&
7523                             amd_8132->revision <= 0x13) {
7524                                 disable_msi = 1;
7525                                 pci_dev_put(amd_8132);
7526                                 break;
7527                         }
7528                 }
7529         }
7530
7531         bnx2_set_default_link(bp);
7532         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7533
7534         init_timer(&bp->timer);
7535         bp->timer.expires = RUN_AT(bp->timer_interval);
7536         bp->timer.data = (unsigned long) bp;
7537         bp->timer.function = bnx2_timer;
7538
7539         return 0;
7540
7541 err_out_unmap:
7542         if (bp->regview) {
7543                 iounmap(bp->regview);
7544                 bp->regview = NULL;
7545         }
7546
7547 err_out_release:
7548         pci_release_regions(pdev);
7549
7550 err_out_disable:
7551         pci_disable_device(pdev);
7552         pci_set_drvdata(pdev, NULL);
7553
7554 err_out:
7555         return rc;
7556 }
7557
7558 static char * __devinit
7559 bnx2_bus_string(struct bnx2 *bp, char *str)
7560 {
7561         char *s = str;
7562
7563         if (bp->flags & BNX2_FLAG_PCIE) {
7564                 s += sprintf(s, "PCI Express");
7565         } else {
7566                 s += sprintf(s, "PCI");
7567                 if (bp->flags & BNX2_FLAG_PCIX)
7568                         s += sprintf(s, "-X");
7569                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7570                         s += sprintf(s, " 32-bit");
7571                 else
7572                         s += sprintf(s, " 64-bit");
7573                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7574         }
7575         return str;
7576 }
7577
7578 static void __devinit
7579 bnx2_init_napi(struct bnx2 *bp)
7580 {
7581         int i;
7582
7583         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7584                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7585                 int (*poll)(struct napi_struct *, int);
7586
7587                 if (i == 0)
7588                         poll = bnx2_poll;
7589                 else
7590                         poll = bnx2_poll_msix;
7591
7592                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7593                 bnapi->bp = bp;
7594         }
7595 }
7596
7597 static int __devinit
7598 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7599 {
7600         static int version_printed = 0;
7601         struct net_device *dev = NULL;
7602         struct bnx2 *bp;
7603         int rc;
7604         char str[40];
7605         DECLARE_MAC_BUF(mac);
7606
7607         if (version_printed++ == 0)
7608                 printk(KERN_INFO "%s", version);
7609
7610         /* dev zeroed in init_etherdev */
7611         dev = alloc_etherdev(sizeof(*bp));
7612
7613         if (!dev)
7614                 return -ENOMEM;
7615
7616         rc = bnx2_init_board(pdev, dev);
7617         if (rc < 0) {
7618                 free_netdev(dev);
7619                 return rc;
7620         }
7621
7622         dev->open = bnx2_open;
7623         dev->hard_start_xmit = bnx2_start_xmit;
7624         dev->stop = bnx2_close;
7625         dev->get_stats = bnx2_get_stats;
7626         dev->set_multicast_list = bnx2_set_rx_mode;
7627         dev->do_ioctl = bnx2_ioctl;
7628         dev->set_mac_address = bnx2_change_mac_addr;
7629         dev->change_mtu = bnx2_change_mtu;
7630         dev->tx_timeout = bnx2_tx_timeout;
7631         dev->watchdog_timeo = TX_TIMEOUT;
7632 #ifdef BCM_VLAN
7633         dev->vlan_rx_register = bnx2_vlan_rx_register;
7634 #endif
7635         dev->ethtool_ops = &bnx2_ethtool_ops;
7636
7637         bp = netdev_priv(dev);
7638         bnx2_init_napi(bp);
7639
7640 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7641         dev->poll_controller = poll_bnx2;
7642 #endif
7643
7644         pci_set_drvdata(pdev, dev);
7645
7646         memcpy(dev->dev_addr, bp->mac_addr, 6);
7647         memcpy(dev->perm_addr, bp->mac_addr, 6);
7648         bp->name = board_info[ent->driver_data].name;
7649
7650         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7651         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7652                 dev->features |= NETIF_F_IPV6_CSUM;
7653
7654 #ifdef BCM_VLAN
7655         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7656 #endif
7657         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7658         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7659                 dev->features |= NETIF_F_TSO6;
7660
7661         if ((rc = register_netdev(dev))) {
7662                 dev_err(&pdev->dev, "Cannot register net device\n");
7663                 if (bp->regview)
7664                         iounmap(bp->regview);
7665                 pci_release_regions(pdev);
7666                 pci_disable_device(pdev);
7667                 pci_set_drvdata(pdev, NULL);
7668                 free_netdev(dev);
7669                 return rc;
7670         }
7671
7672         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7673                 "IRQ %d, node addr %s\n",
7674                 dev->name,
7675                 bp->name,
7676                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7677                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7678                 bnx2_bus_string(bp, str),
7679                 dev->base_addr,
7680                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7681
7682         return 0;
7683 }
7684
7685 static void __devexit
7686 bnx2_remove_one(struct pci_dev *pdev)
7687 {
7688         struct net_device *dev = pci_get_drvdata(pdev);
7689         struct bnx2 *bp = netdev_priv(dev);
7690
7691         flush_scheduled_work();
7692
7693         unregister_netdev(dev);
7694
7695         if (bp->regview)
7696                 iounmap(bp->regview);
7697
7698         free_netdev(dev);
7699         pci_release_regions(pdev);
7700         pci_disable_device(pdev);
7701         pci_set_drvdata(pdev, NULL);
7702 }
7703
7704 static int
7705 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7706 {
7707         struct net_device *dev = pci_get_drvdata(pdev);
7708         struct bnx2 *bp = netdev_priv(dev);
7709         u32 reset_code;
7710
7711         /* PCI register 4 needs to be saved whether netif_running() or not.
7712          * MSI address and data need to be saved if using MSI and
7713          * netif_running().
7714          */
7715         pci_save_state(pdev);
7716         if (!netif_running(dev))
7717                 return 0;
7718
7719         flush_scheduled_work();
7720         bnx2_netif_stop(bp);
7721         netif_device_detach(dev);
7722         del_timer_sync(&bp->timer);
7723         if (bp->flags & BNX2_FLAG_NO_WOL)
7724                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7725         else if (bp->wol)
7726                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7727         else
7728                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7729         bnx2_reset_chip(bp, reset_code);
7730         bnx2_free_skbs(bp);
7731         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7732         return 0;
7733 }
7734
7735 static int
7736 bnx2_resume(struct pci_dev *pdev)
7737 {
7738         struct net_device *dev = pci_get_drvdata(pdev);
7739         struct bnx2 *bp = netdev_priv(dev);
7740
7741         pci_restore_state(pdev);
7742         if (!netif_running(dev))
7743                 return 0;
7744
7745         bnx2_set_power_state(bp, PCI_D0);
7746         netif_device_attach(dev);
7747         bnx2_init_nic(bp, 1);
7748         bnx2_netif_start(bp);
7749         return 0;
7750 }
7751
7752 /**
7753  * bnx2_io_error_detected - called when PCI error is detected
7754  * @pdev: Pointer to PCI device
7755  * @state: The current pci connection state
7756  *
7757  * This function is called after a PCI bus error affecting
7758  * this device has been detected.
7759  */
7760 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7761                                                pci_channel_state_t state)
7762 {
7763         struct net_device *dev = pci_get_drvdata(pdev);
7764         struct bnx2 *bp = netdev_priv(dev);
7765
7766         rtnl_lock();
7767         netif_device_detach(dev);
7768
7769         if (netif_running(dev)) {
7770                 bnx2_netif_stop(bp);
7771                 del_timer_sync(&bp->timer);
7772                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7773         }
7774
7775         pci_disable_device(pdev);
7776         rtnl_unlock();
7777
7778         /* Request a slot slot reset. */
7779         return PCI_ERS_RESULT_NEED_RESET;
7780 }
7781
7782 /**
7783  * bnx2_io_slot_reset - called after the pci bus has been reset.
7784  * @pdev: Pointer to PCI device
7785  *
7786  * Restart the card from scratch, as if from a cold-boot.
7787  */
7788 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7789 {
7790         struct net_device *dev = pci_get_drvdata(pdev);
7791         struct bnx2 *bp = netdev_priv(dev);
7792
7793         rtnl_lock();
7794         if (pci_enable_device(pdev)) {
7795                 dev_err(&pdev->dev,
7796                         "Cannot re-enable PCI device after reset.\n");
7797                 rtnl_unlock();
7798                 return PCI_ERS_RESULT_DISCONNECT;
7799         }
7800         pci_set_master(pdev);
7801         pci_restore_state(pdev);
7802
7803         if (netif_running(dev)) {
7804                 bnx2_set_power_state(bp, PCI_D0);
7805                 bnx2_init_nic(bp, 1);
7806         }
7807
7808         rtnl_unlock();
7809         return PCI_ERS_RESULT_RECOVERED;
7810 }
7811
7812 /**
7813  * bnx2_io_resume - called when traffic can start flowing again.
7814  * @pdev: Pointer to PCI device
7815  *
7816  * This callback is called when the error recovery driver tells us that
7817  * its OK to resume normal operation.
7818  */
7819 static void bnx2_io_resume(struct pci_dev *pdev)
7820 {
7821         struct net_device *dev = pci_get_drvdata(pdev);
7822         struct bnx2 *bp = netdev_priv(dev);
7823
7824         rtnl_lock();
7825         if (netif_running(dev))
7826                 bnx2_netif_start(bp);
7827
7828         netif_device_attach(dev);
7829         rtnl_unlock();
7830 }
7831
7832 static struct pci_error_handlers bnx2_err_handler = {
7833         .error_detected = bnx2_io_error_detected,
7834         .slot_reset     = bnx2_io_slot_reset,
7835         .resume         = bnx2_io_resume,
7836 };
7837
7838 static struct pci_driver bnx2_pci_driver = {
7839         .name           = DRV_MODULE_NAME,
7840         .id_table       = bnx2_pci_tbl,
7841         .probe          = bnx2_init_one,
7842         .remove         = __devexit_p(bnx2_remove_one),
7843         .suspend        = bnx2_suspend,
7844         .resume         = bnx2_resume,
7845         .err_handler    = &bnx2_err_handler,
7846 };
7847
7848 static int __init bnx2_init(void)
7849 {
7850         return pci_register_driver(&bnx2_pci_driver);
7851 }
7852
7853 static void __exit bnx2_cleanup(void)
7854 {
7855         pci_unregister_driver(&bnx2_pci_driver);
7856 }
7857
7858 module_init(bnx2_init);
7859 module_exit(bnx2_cleanup);
7860
7861
7862