1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.0"
60 #define DRV_MODULE_RELDATE "December 11, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
484 vfree(bp->rx_buf_ring);
485 bp->rx_buf_ring = NULL;
486 for (i = 0; i < bp->rx_max_pg_ring; i++) {
487 if (bp->rx_pg_desc_ring[i])
488 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489 bp->rx_pg_desc_ring[i],
490 bp->rx_pg_desc_mapping[i]);
491 bp->rx_pg_desc_ring[i] = NULL;
494 vfree(bp->rx_pg_ring);
495 bp->rx_pg_ring = NULL;
499 bnx2_alloc_mem(struct bnx2 *bp)
501 int i, status_blk_size;
503 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
504 if (bp->tx_buf_ring == NULL)
507 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
508 &bp->tx_desc_mapping);
509 if (bp->tx_desc_ring == NULL)
512 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
513 if (bp->rx_buf_ring == NULL)
516 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
518 for (i = 0; i < bp->rx_max_ring; i++) {
519 bp->rx_desc_ring[i] =
520 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
521 &bp->rx_desc_mapping[i]);
522 if (bp->rx_desc_ring[i] == NULL)
527 if (bp->rx_pg_ring_size) {
528 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
530 if (bp->rx_pg_ring == NULL)
533 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
537 for (i = 0; i < bp->rx_max_pg_ring; i++) {
538 bp->rx_pg_desc_ring[i] =
539 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540 &bp->rx_pg_desc_mapping[i]);
541 if (bp->rx_pg_desc_ring[i] == NULL)
546 /* Combine status and statistics blocks into one allocation. */
547 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548 bp->status_stats_size = status_blk_size +
549 sizeof(struct statistics_block);
551 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
552 &bp->status_blk_mapping);
553 if (bp->status_blk == NULL)
556 memset(bp->status_blk, 0, bp->status_stats_size);
558 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
561 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
563 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565 if (bp->ctx_pages == 0)
567 for (i = 0; i < bp->ctx_pages; i++) {
568 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
570 &bp->ctx_blk_mapping[i]);
571 if (bp->ctx_blk[i] == NULL)
583 bnx2_report_fw_link(struct bnx2 *bp)
585 u32 fw_link_status = 0;
587 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
593 switch (bp->line_speed) {
595 if (bp->duplex == DUPLEX_HALF)
596 fw_link_status = BNX2_LINK_STATUS_10HALF;
598 fw_link_status = BNX2_LINK_STATUS_10FULL;
601 if (bp->duplex == DUPLEX_HALF)
602 fw_link_status = BNX2_LINK_STATUS_100HALF;
604 fw_link_status = BNX2_LINK_STATUS_100FULL;
607 if (bp->duplex == DUPLEX_HALF)
608 fw_link_status = BNX2_LINK_STATUS_1000HALF;
610 fw_link_status = BNX2_LINK_STATUS_1000FULL;
613 if (bp->duplex == DUPLEX_HALF)
614 fw_link_status = BNX2_LINK_STATUS_2500HALF;
616 fw_link_status = BNX2_LINK_STATUS_2500FULL;
620 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
623 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
625 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
628 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
632 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
636 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
638 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
642 bnx2_xceiver_str(struct bnx2 *bp)
644 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
650 bnx2_report_link(struct bnx2 *bp)
653 netif_carrier_on(bp->dev);
654 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655 bnx2_xceiver_str(bp));
657 printk("%d Mbps ", bp->line_speed);
659 if (bp->duplex == DUPLEX_FULL)
660 printk("full duplex");
662 printk("half duplex");
665 if (bp->flow_ctrl & FLOW_CTRL_RX) {
666 printk(", receive ");
667 if (bp->flow_ctrl & FLOW_CTRL_TX)
668 printk("& transmit ");
671 printk(", transmit ");
673 printk("flow control ON");
678 netif_carrier_off(bp->dev);
679 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680 bnx2_xceiver_str(bp));
683 bnx2_report_fw_link(bp);
687 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
689 u32 local_adv, remote_adv;
692 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
693 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
695 if (bp->duplex == DUPLEX_FULL) {
696 bp->flow_ctrl = bp->req_flow_ctrl;
701 if (bp->duplex != DUPLEX_FULL) {
705 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711 bp->flow_ctrl |= FLOW_CTRL_TX;
712 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713 bp->flow_ctrl |= FLOW_CTRL_RX;
717 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
720 if (bp->phy_flags & PHY_SERDES_FLAG) {
721 u32 new_local_adv = 0;
722 u32 new_remote_adv = 0;
724 if (local_adv & ADVERTISE_1000XPAUSE)
725 new_local_adv |= ADVERTISE_PAUSE_CAP;
726 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727 new_local_adv |= ADVERTISE_PAUSE_ASYM;
728 if (remote_adv & ADVERTISE_1000XPAUSE)
729 new_remote_adv |= ADVERTISE_PAUSE_CAP;
730 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
733 local_adv = new_local_adv;
734 remote_adv = new_remote_adv;
737 /* See Table 28B-3 of 802.3ab-1999 spec. */
738 if (local_adv & ADVERTISE_PAUSE_CAP) {
739 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740 if (remote_adv & ADVERTISE_PAUSE_CAP) {
741 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
743 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744 bp->flow_ctrl = FLOW_CTRL_RX;
748 if (remote_adv & ADVERTISE_PAUSE_CAP) {
749 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
753 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
757 bp->flow_ctrl = FLOW_CTRL_TX;
763 bnx2_5709s_linkup(struct bnx2 *bp)
769 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
773 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774 bp->line_speed = bp->req_line_speed;
775 bp->duplex = bp->req_duplex;
778 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
780 case MII_BNX2_GP_TOP_AN_SPEED_10:
781 bp->line_speed = SPEED_10;
783 case MII_BNX2_GP_TOP_AN_SPEED_100:
784 bp->line_speed = SPEED_100;
786 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788 bp->line_speed = SPEED_1000;
790 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791 bp->line_speed = SPEED_2500;
794 if (val & MII_BNX2_GP_TOP_AN_FD)
795 bp->duplex = DUPLEX_FULL;
797 bp->duplex = DUPLEX_HALF;
802 bnx2_5708s_linkup(struct bnx2 *bp)
807 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809 case BCM5708S_1000X_STAT1_SPEED_10:
810 bp->line_speed = SPEED_10;
812 case BCM5708S_1000X_STAT1_SPEED_100:
813 bp->line_speed = SPEED_100;
815 case BCM5708S_1000X_STAT1_SPEED_1G:
816 bp->line_speed = SPEED_1000;
818 case BCM5708S_1000X_STAT1_SPEED_2G5:
819 bp->line_speed = SPEED_2500;
822 if (val & BCM5708S_1000X_STAT1_FD)
823 bp->duplex = DUPLEX_FULL;
825 bp->duplex = DUPLEX_HALF;
831 bnx2_5706s_linkup(struct bnx2 *bp)
833 u32 bmcr, local_adv, remote_adv, common;
836 bp->line_speed = SPEED_1000;
838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839 if (bmcr & BMCR_FULLDPLX) {
840 bp->duplex = DUPLEX_FULL;
843 bp->duplex = DUPLEX_HALF;
846 if (!(bmcr & BMCR_ANENABLE)) {
850 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
853 common = local_adv & remote_adv;
854 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
856 if (common & ADVERTISE_1000XFULL) {
857 bp->duplex = DUPLEX_FULL;
860 bp->duplex = DUPLEX_HALF;
868 bnx2_copper_linkup(struct bnx2 *bp)
872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
873 if (bmcr & BMCR_ANENABLE) {
874 u32 local_adv, remote_adv, common;
876 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
879 common = local_adv & (remote_adv >> 2);
880 if (common & ADVERTISE_1000FULL) {
881 bp->line_speed = SPEED_1000;
882 bp->duplex = DUPLEX_FULL;
884 else if (common & ADVERTISE_1000HALF) {
885 bp->line_speed = SPEED_1000;
886 bp->duplex = DUPLEX_HALF;
889 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
892 common = local_adv & remote_adv;
893 if (common & ADVERTISE_100FULL) {
894 bp->line_speed = SPEED_100;
895 bp->duplex = DUPLEX_FULL;
897 else if (common & ADVERTISE_100HALF) {
898 bp->line_speed = SPEED_100;
899 bp->duplex = DUPLEX_HALF;
901 else if (common & ADVERTISE_10FULL) {
902 bp->line_speed = SPEED_10;
903 bp->duplex = DUPLEX_FULL;
905 else if (common & ADVERTISE_10HALF) {
906 bp->line_speed = SPEED_10;
907 bp->duplex = DUPLEX_HALF;
916 if (bmcr & BMCR_SPEED100) {
917 bp->line_speed = SPEED_100;
920 bp->line_speed = SPEED_10;
922 if (bmcr & BMCR_FULLDPLX) {
923 bp->duplex = DUPLEX_FULL;
926 bp->duplex = DUPLEX_HALF;
934 bnx2_set_mac_link(struct bnx2 *bp)
938 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940 (bp->duplex == DUPLEX_HALF)) {
941 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
944 /* Configure the EMAC mode register. */
945 val = REG_RD(bp, BNX2_EMAC_MODE);
947 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
948 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
949 BNX2_EMAC_MODE_25G_MODE);
952 switch (bp->line_speed) {
954 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955 val |= BNX2_EMAC_MODE_PORT_MII_10M;
960 val |= BNX2_EMAC_MODE_PORT_MII;
963 val |= BNX2_EMAC_MODE_25G_MODE;
966 val |= BNX2_EMAC_MODE_PORT_GMII;
971 val |= BNX2_EMAC_MODE_PORT_GMII;
974 /* Set the MAC to operate in the appropriate duplex mode. */
975 if (bp->duplex == DUPLEX_HALF)
976 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977 REG_WR(bp, BNX2_EMAC_MODE, val);
979 /* Enable/disable rx PAUSE. */
980 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
982 if (bp->flow_ctrl & FLOW_CTRL_RX)
983 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
986 /* Enable/disable tx PAUSE. */
987 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
990 if (bp->flow_ctrl & FLOW_CTRL_TX)
991 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
994 /* Acknowledge the interrupt. */
995 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1001 bnx2_enable_bmsr1(struct bnx2 *bp)
1003 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004 (CHIP_NUM(bp) == CHIP_NUM_5709))
1005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006 MII_BNX2_BLK_ADDR_GP_STATUS);
1010 bnx2_disable_bmsr1(struct bnx2 *bp)
1012 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013 (CHIP_NUM(bp) == CHIP_NUM_5709))
1014 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1019 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1027 if (bp->autoneg & AUTONEG_SPEED)
1028 bp->advertising |= ADVERTISED_2500baseX_Full;
1030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1033 bnx2_read_phy(bp, bp->mii_up1, &up1);
1034 if (!(up1 & BCM5708S_UP1_2G5)) {
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, bp->mii_up1, up1);
1040 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1048 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1053 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1059 bnx2_read_phy(bp, bp->mii_up1, &up1);
1060 if (up1 & BCM5708S_UP1_2G5) {
1061 up1 &= ~BCM5708S_UP1_2G5;
1062 bnx2_write_phy(bp, bp->mii_up1, up1);
1066 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1074 bnx2_enable_forced_2g5(struct bnx2 *bp)
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr |= BCM5708S_BMCR_FORCE_2500;
1100 if (bp->autoneg & AUTONEG_SPEED) {
1101 bmcr &= ~BMCR_ANENABLE;
1102 if (bp->req_duplex == DUPLEX_FULL)
1103 bmcr |= BMCR_FULLDPLX;
1105 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1109 bnx2_disable_forced_2g5(struct bnx2 *bp)
1113 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1119 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120 MII_BNX2_BLK_ADDR_SERDES_DIG);
1121 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1125 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1129 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1130 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1134 if (bp->autoneg & AUTONEG_SPEED)
1135 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1140 bnx2_set_link(struct bnx2 *bp)
1145 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1150 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1153 link_up = bp->link_up;
1155 bnx2_enable_bmsr1(bp);
1156 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158 bnx2_disable_bmsr1(bp);
1160 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1164 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165 if (val & BNX2_EMAC_STATUS_LINK)
1166 bmsr |= BMSR_LSTATUS;
1168 bmsr &= ~BMSR_LSTATUS;
1171 if (bmsr & BMSR_LSTATUS) {
1174 if (bp->phy_flags & PHY_SERDES_FLAG) {
1175 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176 bnx2_5706s_linkup(bp);
1177 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178 bnx2_5708s_linkup(bp);
1179 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180 bnx2_5709s_linkup(bp);
1183 bnx2_copper_linkup(bp);
1185 bnx2_resolve_flow_ctrl(bp);
1188 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1189 (bp->autoneg & AUTONEG_SPEED))
1190 bnx2_disable_forced_2g5(bp);
1192 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1196 if (bp->link_up != link_up) {
1197 bnx2_report_link(bp);
1200 bnx2_set_mac_link(bp);
1206 bnx2_reset_phy(struct bnx2 *bp)
1211 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1213 #define PHY_RESET_MAX_WAIT 100
1214 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1217 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1218 if (!(reg & BMCR_RESET)) {
1223 if (i == PHY_RESET_MAX_WAIT) {
1230 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1234 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1237 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238 adv = ADVERTISE_1000XPAUSE;
1241 adv = ADVERTISE_PAUSE_CAP;
1244 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246 adv = ADVERTISE_1000XPSE_ASYM;
1249 adv = ADVERTISE_PAUSE_ASYM;
1252 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1257 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1263 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1266 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1268 u32 speed_arg = 0, pause_adv;
1270 pause_adv = bnx2_phy_get_pause_adv(bp);
1272 if (bp->autoneg & AUTONEG_SPEED) {
1273 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274 if (bp->advertising & ADVERTISED_10baseT_Half)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276 if (bp->advertising & ADVERTISED_10baseT_Full)
1277 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278 if (bp->advertising & ADVERTISED_100baseT_Half)
1279 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280 if (bp->advertising & ADVERTISED_100baseT_Full)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1287 if (bp->req_line_speed == SPEED_2500)
1288 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289 else if (bp->req_line_speed == SPEED_1000)
1290 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291 else if (bp->req_line_speed == SPEED_100) {
1292 if (bp->req_duplex == DUPLEX_FULL)
1293 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1295 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 } else if (bp->req_line_speed == SPEED_10) {
1297 if (bp->req_duplex == DUPLEX_FULL)
1298 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1300 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1304 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1309 if (port == PORT_TP)
1310 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1313 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1315 spin_unlock_bh(&bp->phy_lock);
1316 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317 spin_lock_bh(&bp->phy_lock);
1323 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1328 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329 return (bnx2_setup_remote_phy(bp, port));
1331 if (!(bp->autoneg & AUTONEG_SPEED)) {
1333 int force_link_down = 0;
1335 if (bp->req_line_speed == SPEED_2500) {
1336 if (!bnx2_test_and_enable_2g5(bp))
1337 force_link_down = 1;
1338 } else if (bp->req_line_speed == SPEED_1000) {
1339 if (bnx2_test_and_disable_2g5(bp))
1340 force_link_down = 1;
1342 bnx2_read_phy(bp, bp->mii_adv, &adv);
1343 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1345 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346 new_bmcr = bmcr & ~BMCR_ANENABLE;
1347 new_bmcr |= BMCR_SPEED1000;
1349 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350 if (bp->req_line_speed == SPEED_2500)
1351 bnx2_enable_forced_2g5(bp);
1352 else if (bp->req_line_speed == SPEED_1000) {
1353 bnx2_disable_forced_2g5(bp);
1354 new_bmcr &= ~0x2000;
1357 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1358 if (bp->req_line_speed == SPEED_2500)
1359 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1361 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1364 if (bp->req_duplex == DUPLEX_FULL) {
1365 adv |= ADVERTISE_1000XFULL;
1366 new_bmcr |= BMCR_FULLDPLX;
1369 adv |= ADVERTISE_1000XHALF;
1370 new_bmcr &= ~BMCR_FULLDPLX;
1372 if ((new_bmcr != bmcr) || (force_link_down)) {
1373 /* Force a link down visible on the other side */
1375 bnx2_write_phy(bp, bp->mii_adv, adv &
1376 ~(ADVERTISE_1000XFULL |
1377 ADVERTISE_1000XHALF));
1378 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1379 BMCR_ANRESTART | BMCR_ANENABLE);
1382 netif_carrier_off(bp->dev);
1383 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1384 bnx2_report_link(bp);
1386 bnx2_write_phy(bp, bp->mii_adv, adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1389 bnx2_resolve_flow_ctrl(bp);
1390 bnx2_set_mac_link(bp);
1395 bnx2_test_and_enable_2g5(bp);
1397 if (bp->advertising & ADVERTISED_1000baseT_Full)
1398 new_adv |= ADVERTISE_1000XFULL;
1400 new_adv |= bnx2_phy_get_pause_adv(bp);
1402 bnx2_read_phy(bp, bp->mii_adv, &adv);
1403 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1405 bp->serdes_an_pending = 0;
1406 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407 /* Force a link down visible on the other side */
1409 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1410 spin_unlock_bh(&bp->phy_lock);
1412 spin_lock_bh(&bp->phy_lock);
1415 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1418 /* Speed up link-up time when the link partner
1419 * does not autonegotiate which is very common
1420 * in blade servers. Some blade servers use
1421 * IPMI for kerboard input and it's important
1422 * to minimize link disruptions. Autoneg. involves
1423 * exchanging base pages plus 3 next pages and
1424 * normally completes in about 120 msec.
1426 bp->current_interval = SERDES_AN_TIMEOUT;
1427 bp->serdes_an_pending = 1;
1428 mod_timer(&bp->timer, jiffies + bp->current_interval);
1430 bnx2_resolve_flow_ctrl(bp);
1431 bnx2_set_mac_link(bp);
1437 #define ETHTOOL_ALL_FIBRE_SPEED \
1438 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1439 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440 (ADVERTISED_1000baseT_Full)
1442 #define ETHTOOL_ALL_COPPER_SPEED \
1443 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1444 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1445 ADVERTISED_1000baseT_Full)
1447 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1450 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1453 bnx2_set_default_remote_link(struct bnx2 *bp)
1457 if (bp->phy_port == PORT_TP)
1458 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1460 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1462 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463 bp->req_line_speed = 0;
1464 bp->autoneg |= AUTONEG_SPEED;
1465 bp->advertising = ADVERTISED_Autoneg;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467 bp->advertising |= ADVERTISED_10baseT_Half;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469 bp->advertising |= ADVERTISED_10baseT_Full;
1470 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471 bp->advertising |= ADVERTISED_100baseT_Half;
1472 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473 bp->advertising |= ADVERTISED_100baseT_Full;
1474 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475 bp->advertising |= ADVERTISED_1000baseT_Full;
1476 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477 bp->advertising |= ADVERTISED_2500baseX_Full;
1480 bp->advertising = 0;
1481 bp->req_duplex = DUPLEX_FULL;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483 bp->req_line_speed = SPEED_10;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485 bp->req_duplex = DUPLEX_HALF;
1487 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488 bp->req_line_speed = SPEED_100;
1489 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490 bp->req_duplex = DUPLEX_HALF;
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493 bp->req_line_speed = SPEED_1000;
1494 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495 bp->req_line_speed = SPEED_2500;
1500 bnx2_set_default_link(struct bnx2 *bp)
1502 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503 return bnx2_set_default_remote_link(bp);
1505 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506 bp->req_line_speed = 0;
1507 if (bp->phy_flags & PHY_SERDES_FLAG) {
1510 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1512 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1516 bp->req_line_speed = bp->line_speed = SPEED_1000;
1517 bp->req_duplex = DUPLEX_FULL;
1520 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1524 bnx2_send_heart_beat(struct bnx2 *bp)
1529 spin_lock(&bp->indirect_lock);
1530 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534 spin_unlock(&bp->indirect_lock);
1538 bnx2_remote_phy_event(struct bnx2 *bp)
1541 u8 link_up = bp->link_up;
1544 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1546 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547 bnx2_send_heart_beat(bp);
1549 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1551 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1557 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558 bp->duplex = DUPLEX_FULL;
1560 case BNX2_LINK_STATUS_10HALF:
1561 bp->duplex = DUPLEX_HALF;
1562 case BNX2_LINK_STATUS_10FULL:
1563 bp->line_speed = SPEED_10;
1565 case BNX2_LINK_STATUS_100HALF:
1566 bp->duplex = DUPLEX_HALF;
1567 case BNX2_LINK_STATUS_100BASE_T4:
1568 case BNX2_LINK_STATUS_100FULL:
1569 bp->line_speed = SPEED_100;
1571 case BNX2_LINK_STATUS_1000HALF:
1572 bp->duplex = DUPLEX_HALF;
1573 case BNX2_LINK_STATUS_1000FULL:
1574 bp->line_speed = SPEED_1000;
1576 case BNX2_LINK_STATUS_2500HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_2500FULL:
1579 bp->line_speed = SPEED_2500;
1586 spin_lock(&bp->phy_lock);
1588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590 if (bp->duplex == DUPLEX_FULL)
1591 bp->flow_ctrl = bp->req_flow_ctrl;
1593 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594 bp->flow_ctrl |= FLOW_CTRL_TX;
1595 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596 bp->flow_ctrl |= FLOW_CTRL_RX;
1599 old_port = bp->phy_port;
1600 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601 bp->phy_port = PORT_FIBRE;
1603 bp->phy_port = PORT_TP;
1605 if (old_port != bp->phy_port)
1606 bnx2_set_default_link(bp);
1608 spin_unlock(&bp->phy_lock);
1610 if (bp->link_up != link_up)
1611 bnx2_report_link(bp);
1613 bnx2_set_mac_link(bp);
1617 bnx2_set_remote_link(struct bnx2 *bp)
1621 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1623 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624 bnx2_remote_phy_event(bp);
1626 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1628 bnx2_send_heart_beat(bp);
1635 bnx2_setup_copper_phy(struct bnx2 *bp)
1640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1642 if (bp->autoneg & AUTONEG_SPEED) {
1643 u32 adv_reg, adv1000_reg;
1644 u32 new_adv_reg = 0;
1645 u32 new_adv1000_reg = 0;
1647 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1648 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649 ADVERTISE_PAUSE_ASYM);
1651 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652 adv1000_reg &= PHY_ALL_1000_SPEED;
1654 if (bp->advertising & ADVERTISED_10baseT_Half)
1655 new_adv_reg |= ADVERTISE_10HALF;
1656 if (bp->advertising & ADVERTISED_10baseT_Full)
1657 new_adv_reg |= ADVERTISE_10FULL;
1658 if (bp->advertising & ADVERTISED_100baseT_Half)
1659 new_adv_reg |= ADVERTISE_100HALF;
1660 if (bp->advertising & ADVERTISED_100baseT_Full)
1661 new_adv_reg |= ADVERTISE_100FULL;
1662 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663 new_adv1000_reg |= ADVERTISE_1000FULL;
1665 new_adv_reg |= ADVERTISE_CSMA;
1667 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1669 if ((adv1000_reg != new_adv1000_reg) ||
1670 (adv_reg != new_adv_reg) ||
1671 ((bmcr & BMCR_ANENABLE) == 0)) {
1673 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1674 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1675 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1678 else if (bp->link_up) {
1679 /* Flow ctrl may have changed from auto to forced */
1680 /* or vice-versa. */
1682 bnx2_resolve_flow_ctrl(bp);
1683 bnx2_set_mac_link(bp);
1689 if (bp->req_line_speed == SPEED_100) {
1690 new_bmcr |= BMCR_SPEED100;
1692 if (bp->req_duplex == DUPLEX_FULL) {
1693 new_bmcr |= BMCR_FULLDPLX;
1695 if (new_bmcr != bmcr) {
1698 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1701 if (bmsr & BMSR_LSTATUS) {
1702 /* Force link down */
1703 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1704 spin_unlock_bh(&bp->phy_lock);
1706 spin_lock_bh(&bp->phy_lock);
1708 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1712 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1714 /* Normally, the new speed is setup after the link has
1715 * gone down and up again. In some cases, link will not go
1716 * down so we need to set up the new speed here.
1718 if (bmsr & BMSR_LSTATUS) {
1719 bp->line_speed = bp->req_line_speed;
1720 bp->duplex = bp->req_duplex;
1721 bnx2_resolve_flow_ctrl(bp);
1722 bnx2_set_mac_link(bp);
1725 bnx2_resolve_flow_ctrl(bp);
1726 bnx2_set_mac_link(bp);
1732 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1734 if (bp->loopback == MAC_LOOPBACK)
1737 if (bp->phy_flags & PHY_SERDES_FLAG) {
1738 return (bnx2_setup_serdes_phy(bp, port));
1741 return (bnx2_setup_copper_phy(bp));
1746 bnx2_init_5709s_phy(struct bnx2 *bp)
1750 bp->mii_bmcr = MII_BMCR + 0x10;
1751 bp->mii_bmsr = MII_BMSR + 0x10;
1752 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753 bp->mii_adv = MII_ADVERTISE + 0x10;
1754 bp->mii_lpa = MII_LPA + 0x10;
1755 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1763 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1765 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1770 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773 val |= BCM5708S_UP1_2G5;
1775 val &= ~BCM5708S_UP1_2G5;
1776 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1778 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1783 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1785 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1789 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1795 bnx2_init_5708s_phy(struct bnx2 *bp)
1801 bp->mii_up1 = BCM5708S_UP1;
1803 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1807 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1811 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1815 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817 val |= BCM5708S_UP1_2G5;
1818 bnx2_write_phy(bp, BCM5708S_UP1, val);
1821 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1822 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1824 /* increase tx signal amplitude */
1825 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826 BCM5708S_BLK_ADDR_TX_MISC);
1827 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1833 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1834 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1839 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1840 BNX2_SHARED_HW_CFG_CONFIG);
1841 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843 BCM5708S_BLK_ADDR_TX_MISC);
1844 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846 BCM5708S_BLK_ADDR_DIG);
1853 bnx2_init_5706s_phy(struct bnx2 *bp)
1857 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1859 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1862 if (bp->dev->mtu > 1500) {
1865 /* Set extended packet length bit */
1866 bnx2_write_phy(bp, 0x18, 0x7);
1867 bnx2_read_phy(bp, 0x18, &val);
1868 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1870 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871 bnx2_read_phy(bp, 0x1c, &val);
1872 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1877 bnx2_write_phy(bp, 0x18, 0x7);
1878 bnx2_read_phy(bp, 0x18, &val);
1879 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1881 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882 bnx2_read_phy(bp, 0x1c, &val);
1883 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1890 bnx2_init_copper_phy(struct bnx2 *bp)
1896 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897 bnx2_write_phy(bp, 0x18, 0x0c00);
1898 bnx2_write_phy(bp, 0x17, 0x000a);
1899 bnx2_write_phy(bp, 0x15, 0x310b);
1900 bnx2_write_phy(bp, 0x17, 0x201f);
1901 bnx2_write_phy(bp, 0x15, 0x9506);
1902 bnx2_write_phy(bp, 0x17, 0x401f);
1903 bnx2_write_phy(bp, 0x15, 0x14e2);
1904 bnx2_write_phy(bp, 0x18, 0x0400);
1907 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909 MII_BNX2_DSP_EXPAND_REG | 0x8);
1910 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1912 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1915 if (bp->dev->mtu > 1500) {
1916 /* Set extended packet length bit */
1917 bnx2_write_phy(bp, 0x18, 0x7);
1918 bnx2_read_phy(bp, 0x18, &val);
1919 bnx2_write_phy(bp, 0x18, val | 0x4000);
1921 bnx2_read_phy(bp, 0x10, &val);
1922 bnx2_write_phy(bp, 0x10, val | 0x1);
1925 bnx2_write_phy(bp, 0x18, 0x7);
1926 bnx2_read_phy(bp, 0x18, &val);
1927 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1929 bnx2_read_phy(bp, 0x10, &val);
1930 bnx2_write_phy(bp, 0x10, val & ~0x1);
1933 /* ethernet@wirespeed */
1934 bnx2_write_phy(bp, 0x18, 0x7007);
1935 bnx2_read_phy(bp, 0x18, &val);
1936 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1942 bnx2_init_phy(struct bnx2 *bp)
1947 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1950 bp->mii_bmcr = MII_BMCR;
1951 bp->mii_bmsr = MII_BMSR;
1952 bp->mii_bmsr1 = MII_BMSR;
1953 bp->mii_adv = MII_ADVERTISE;
1954 bp->mii_lpa = MII_LPA;
1956 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1958 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1961 bnx2_read_phy(bp, MII_PHYSID1, &val);
1962 bp->phy_id = val << 16;
1963 bnx2_read_phy(bp, MII_PHYSID2, &val);
1964 bp->phy_id |= val & 0xffff;
1966 if (bp->phy_flags & PHY_SERDES_FLAG) {
1967 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968 rc = bnx2_init_5706s_phy(bp);
1969 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970 rc = bnx2_init_5708s_phy(bp);
1971 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972 rc = bnx2_init_5709s_phy(bp);
1975 rc = bnx2_init_copper_phy(bp);
1980 rc = bnx2_setup_phy(bp, bp->phy_port);
1986 bnx2_set_mac_loopback(struct bnx2 *bp)
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1998 static int bnx2_test_link(struct bnx2 *);
2001 bnx2_set_phy_loopback(struct bnx2 *bp)
2006 spin_lock_bh(&bp->phy_lock);
2007 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2009 spin_unlock_bh(&bp->phy_lock);
2013 for (i = 0; i < 10; i++) {
2014 if (bnx2_test_link(bp) == 0)
2019 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2022 BNX2_EMAC_MODE_25G_MODE);
2024 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2031 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2037 msg_data |= bp->fw_wr_seq;
2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2041 /* wait for an acknowledgement. */
2042 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2045 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2047 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2050 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2053 /* If we timed out, inform the firmware that this is the case. */
2054 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2056 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2059 msg_data &= ~BNX2_DRV_MSG_CODE;
2060 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2062 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2067 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2074 bnx2_init_5709_context(struct bnx2 *bp)
2079 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080 val |= (BCM_PAGE_BITS - 8) << 16;
2081 REG_WR(bp, BNX2_CTX_COMMAND, val);
2082 for (i = 0; i < 10; i++) {
2083 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2088 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2091 for (i = 0; i < bp->ctx_pages; i++) {
2094 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098 (u64) bp->ctx_blk_mapping[i] >> 32);
2099 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101 for (j = 0; j < 10; j++) {
2103 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2108 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2117 bnx2_init_context(struct bnx2 *bp)
2123 u32 vcid_addr, pcid_addr, offset;
2128 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2131 vcid_addr = GET_PCID_ADDR(vcid);
2133 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2138 pcid_addr = GET_PCID_ADDR(new_vcid);
2141 vcid_addr = GET_CID_ADDR(vcid);
2142 pcid_addr = vcid_addr;
2145 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146 vcid_addr += (i << PHY_CTX_SHIFT);
2147 pcid_addr += (i << PHY_CTX_SHIFT);
2149 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2150 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2152 /* Zero out the context. */
2153 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2154 CTX_WR(bp, vcid_addr, offset, 0);
2160 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2166 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167 if (good_mbuf == NULL) {
2168 printk(KERN_ERR PFX "Failed to allocate memory in "
2169 "bnx2_alloc_bad_rbuf\n");
2173 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2178 /* Allocate a bunch of mbufs and save the good ones in an array. */
2179 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2183 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2185 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2187 /* The addresses with Bit 9 set are bad memory blocks. */
2188 if (!(val & (1 << 9))) {
2189 good_mbuf[good_mbuf_cnt] = (u16) val;
2193 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196 /* Free the good ones back to the mbuf pool thus discarding
2197 * all the bad ones. */
2198 while (good_mbuf_cnt) {
2201 val = good_mbuf[good_mbuf_cnt];
2202 val = (val << 9) | val | 1;
2204 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2211 bnx2_set_mac_addr(struct bnx2 *bp)
2214 u8 *mac_addr = bp->dev->dev_addr;
2216 val = (mac_addr[0] << 8) | mac_addr[1];
2218 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2220 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2221 (mac_addr[4] << 8) | mac_addr[5];
2223 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2227 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2230 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231 struct rx_bd *rxbd =
2232 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233 struct page *page = alloc_page(GFP_ATOMIC);
2237 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238 PCI_DMA_FROMDEVICE);
2240 pci_unmap_addr_set(rx_pg, mapping, mapping);
2241 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2247 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2249 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250 struct page *page = rx_pg->page;
2255 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256 PCI_DMA_FROMDEVICE);
2263 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2265 struct sk_buff *skb;
2266 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2268 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2269 unsigned long align;
2271 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2276 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277 skb_reserve(skb, BNX2_RX_ALIGN - align);
2279 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280 PCI_DMA_FROMDEVICE);
2283 pci_unmap_addr_set(rx_buf, mapping, mapping);
2285 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2288 bp->rx_prod_bseq += bp->rx_buf_use_size;
2294 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2296 struct status_block *sblk = bp->status_blk;
2297 u32 new_link_state, old_link_state;
2300 new_link_state = sblk->status_attn_bits & event;
2301 old_link_state = sblk->status_attn_bits_ack & event;
2302 if (new_link_state != old_link_state) {
2304 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2306 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2314 bnx2_phy_int(struct bnx2 *bp)
2316 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317 spin_lock(&bp->phy_lock);
2319 spin_unlock(&bp->phy_lock);
2321 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322 bnx2_set_remote_link(bp);
2327 bnx2_get_hw_tx_cons(struct bnx2 *bp)
2331 cons = bp->status_blk->status_tx_quick_consumer_index0;
2333 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2339 bnx2_tx_int(struct bnx2 *bp)
2341 u16 hw_cons, sw_cons, sw_ring_cons;
2344 hw_cons = bnx2_get_hw_tx_cons(bp);
2345 sw_cons = bp->tx_cons;
2347 while (sw_cons != hw_cons) {
2348 struct sw_bd *tx_buf;
2349 struct sk_buff *skb;
2352 sw_ring_cons = TX_RING_IDX(sw_cons);
2354 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2357 /* partial BD completions possible with TSO packets */
2358 if (skb_is_gso(skb)) {
2359 u16 last_idx, last_ring_idx;
2361 last_idx = sw_cons +
2362 skb_shinfo(skb)->nr_frags + 1;
2363 last_ring_idx = sw_ring_cons +
2364 skb_shinfo(skb)->nr_frags + 1;
2365 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2368 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2373 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2374 skb_headlen(skb), PCI_DMA_TODEVICE);
2377 last = skb_shinfo(skb)->nr_frags;
2379 for (i = 0; i < last; i++) {
2380 sw_cons = NEXT_TX_BD(sw_cons);
2382 pci_unmap_page(bp->pdev,
2384 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2386 skb_shinfo(skb)->frags[i].size,
2390 sw_cons = NEXT_TX_BD(sw_cons);
2392 tx_free_bd += last + 1;
2396 hw_cons = bnx2_get_hw_tx_cons(bp);
2399 bp->hw_tx_cons = hw_cons;
2400 bp->tx_cons = sw_cons;
2401 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2402 * before checking for netif_queue_stopped(). Without the
2403 * memory barrier, there is a small possibility that bnx2_start_xmit()
2404 * will miss it and cause the queue to be stopped forever.
2408 if (unlikely(netif_queue_stopped(bp->dev)) &&
2409 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2410 netif_tx_lock(bp->dev);
2411 if ((netif_queue_stopped(bp->dev)) &&
2412 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2413 netif_wake_queue(bp->dev);
2414 netif_tx_unlock(bp->dev);
2419 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2421 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2422 struct rx_bd *cons_bd, *prod_bd;
2425 u16 hw_prod = bp->rx_pg_prod, prod;
2426 u16 cons = bp->rx_pg_cons;
2428 for (i = 0; i < count; i++) {
2429 prod = RX_PG_RING_IDX(hw_prod);
2431 prod_rx_pg = &bp->rx_pg_ring[prod];
2432 cons_rx_pg = &bp->rx_pg_ring[cons];
2433 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2434 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2436 if (i == 0 && skb) {
2438 struct skb_shared_info *shinfo;
2440 shinfo = skb_shinfo(skb);
2442 page = shinfo->frags[shinfo->nr_frags].page;
2443 shinfo->frags[shinfo->nr_frags].page = NULL;
2444 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2445 PCI_DMA_FROMDEVICE);
2446 cons_rx_pg->page = page;
2447 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2451 prod_rx_pg->page = cons_rx_pg->page;
2452 cons_rx_pg->page = NULL;
2453 pci_unmap_addr_set(prod_rx_pg, mapping,
2454 pci_unmap_addr(cons_rx_pg, mapping));
2456 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2457 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2460 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2461 hw_prod = NEXT_RX_BD(hw_prod);
2463 bp->rx_pg_prod = hw_prod;
2464 bp->rx_pg_cons = cons;
2468 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2471 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2472 struct rx_bd *cons_bd, *prod_bd;
2474 cons_rx_buf = &bp->rx_buf_ring[cons];
2475 prod_rx_buf = &bp->rx_buf_ring[prod];
2477 pci_dma_sync_single_for_device(bp->pdev,
2478 pci_unmap_addr(cons_rx_buf, mapping),
2479 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2481 bp->rx_prod_bseq += bp->rx_buf_use_size;
2483 prod_rx_buf->skb = skb;
2488 pci_unmap_addr_set(prod_rx_buf, mapping,
2489 pci_unmap_addr(cons_rx_buf, mapping));
2491 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2492 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2493 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2494 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2498 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2499 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
2502 u16 prod = ring_idx & 0xffff;
2504 err = bnx2_alloc_rx_skb(bp, prod);
2505 if (unlikely(err)) {
2506 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2508 unsigned int raw_len = len + 4;
2509 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2511 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2516 skb_reserve(skb, bp->rx_offset);
2517 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2518 PCI_DMA_FROMDEVICE);
2524 unsigned int i, frag_len, frag_size, pages;
2525 struct sw_pg *rx_pg;
2526 u16 pg_cons = bp->rx_pg_cons;
2527 u16 pg_prod = bp->rx_pg_prod;
2529 frag_size = len + 4 - hdr_len;
2530 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2531 skb_put(skb, hdr_len);
2533 for (i = 0; i < pages; i++) {
2534 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2535 if (unlikely(frag_len <= 4)) {
2536 unsigned int tail = 4 - frag_len;
2538 bp->rx_pg_cons = pg_cons;
2539 bp->rx_pg_prod = pg_prod;
2540 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2546 &skb_shinfo(skb)->frags[i - 1];
2548 skb->data_len -= tail;
2549 skb->truesize -= tail;
2553 rx_pg = &bp->rx_pg_ring[pg_cons];
2555 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2556 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2561 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2564 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2565 if (unlikely(err)) {
2566 bp->rx_pg_cons = pg_cons;
2567 bp->rx_pg_prod = pg_prod;
2568 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2572 frag_size -= frag_len;
2573 skb->data_len += frag_len;
2574 skb->truesize += frag_len;
2575 skb->len += frag_len;
2577 pg_prod = NEXT_RX_BD(pg_prod);
2578 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2580 bp->rx_pg_prod = pg_prod;
2581 bp->rx_pg_cons = pg_cons;
2587 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2589 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2591 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2597 bnx2_rx_int(struct bnx2 *bp, int budget)
2599 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2600 struct l2_fhdr *rx_hdr;
2601 int rx_pkt = 0, pg_ring_used = 0;
2603 hw_cons = bnx2_get_hw_rx_cons(bp);
2604 sw_cons = bp->rx_cons;
2605 sw_prod = bp->rx_prod;
2607 /* Memory barrier necessary as speculative reads of the rx
2608 * buffer can be ahead of the index in the status block
2611 while (sw_cons != hw_cons) {
2612 unsigned int len, hdr_len;
2614 struct sw_bd *rx_buf;
2615 struct sk_buff *skb;
2616 dma_addr_t dma_addr;
2618 sw_ring_cons = RX_RING_IDX(sw_cons);
2619 sw_ring_prod = RX_RING_IDX(sw_prod);
2621 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2626 dma_addr = pci_unmap_addr(rx_buf, mapping);
2628 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2629 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2631 rx_hdr = (struct l2_fhdr *) skb->data;
2632 len = rx_hdr->l2_fhdr_pkt_len;
2634 if ((status = rx_hdr->l2_fhdr_status) &
2635 (L2_FHDR_ERRORS_BAD_CRC |
2636 L2_FHDR_ERRORS_PHY_DECODE |
2637 L2_FHDR_ERRORS_ALIGNMENT |
2638 L2_FHDR_ERRORS_TOO_SHORT |
2639 L2_FHDR_ERRORS_GIANT_FRAME)) {
2641 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2645 if (status & L2_FHDR_STATUS_SPLIT) {
2646 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2648 } else if (len > bp->rx_jumbo_thresh) {
2649 hdr_len = bp->rx_jumbo_thresh;
2655 if (len <= bp->rx_copy_thresh) {
2656 struct sk_buff *new_skb;
2658 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2659 if (new_skb == NULL) {
2660 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2666 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2667 new_skb->data, len + 2);
2668 skb_reserve(new_skb, 2);
2669 skb_put(new_skb, len);
2671 bnx2_reuse_rx_skb(bp, skb,
2672 sw_ring_cons, sw_ring_prod);
2675 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
2676 (sw_ring_cons << 16) | sw_ring_prod)))
2679 skb->protocol = eth_type_trans(skb, bp->dev);
2681 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2682 (ntohs(skb->protocol) != 0x8100)) {
2689 skb->ip_summed = CHECKSUM_NONE;
2691 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2692 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2694 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2695 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2696 skb->ip_summed = CHECKSUM_UNNECESSARY;
2700 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2701 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2702 rx_hdr->l2_fhdr_vlan_tag);
2706 netif_receive_skb(skb);
2708 bp->dev->last_rx = jiffies;
2712 sw_cons = NEXT_RX_BD(sw_cons);
2713 sw_prod = NEXT_RX_BD(sw_prod);
2715 if ((rx_pkt == budget))
2718 /* Refresh hw_cons to see if there is new work */
2719 if (sw_cons == hw_cons) {
2720 hw_cons = bnx2_get_hw_rx_cons(bp);
2724 bp->rx_cons = sw_cons;
2725 bp->rx_prod = sw_prod;
2728 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2731 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2733 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2741 /* MSI ISR - The only difference between this and the INTx ISR
2742 * is that the MSI interrupt is always serviced.
2745 bnx2_msi(int irq, void *dev_instance)
2747 struct net_device *dev = dev_instance;
2748 struct bnx2 *bp = netdev_priv(dev);
2750 prefetch(bp->status_blk);
2751 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2752 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2753 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2755 /* Return here if interrupt is disabled. */
2756 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2759 netif_rx_schedule(dev, &bp->napi);
2765 bnx2_msi_1shot(int irq, void *dev_instance)
2767 struct net_device *dev = dev_instance;
2768 struct bnx2 *bp = netdev_priv(dev);
2770 prefetch(bp->status_blk);
2772 /* Return here if interrupt is disabled. */
2773 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2776 netif_rx_schedule(dev, &bp->napi);
2782 bnx2_interrupt(int irq, void *dev_instance)
2784 struct net_device *dev = dev_instance;
2785 struct bnx2 *bp = netdev_priv(dev);
2786 struct status_block *sblk = bp->status_blk;
2788 /* When using INTx, it is possible for the interrupt to arrive
2789 * at the CPU before the status block posted prior to the
2790 * interrupt. Reading a register will flush the status block.
2791 * When using MSI, the MSI message will always complete after
2792 * the status block write.
2794 if ((sblk->status_idx == bp->last_status_idx) &&
2795 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2796 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2799 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2800 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2801 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2803 /* Read back to deassert IRQ immediately to avoid too many
2804 * spurious interrupts.
2806 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2808 /* Return here if interrupt is shared and is disabled. */
2809 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2812 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2813 bp->last_status_idx = sblk->status_idx;
2814 __netif_rx_schedule(dev, &bp->napi);
2820 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2821 STATUS_ATTN_BITS_TIMER_ABORT)
2824 bnx2_has_work(struct bnx2 *bp)
2826 struct status_block *sblk = bp->status_blk;
2828 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2829 (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons))
2832 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2833 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2839 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2841 struct status_block *sblk = bp->status_blk;
2842 u32 status_attn_bits = sblk->status_attn_bits;
2843 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2845 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2846 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2850 /* This is needed to take care of transient status
2851 * during link changes.
2853 REG_WR(bp, BNX2_HC_COMMAND,
2854 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2855 REG_RD(bp, BNX2_HC_COMMAND);
2858 if (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons)
2861 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2862 work_done += bnx2_rx_int(bp, budget - work_done);
2867 static int bnx2_poll(struct napi_struct *napi, int budget)
2869 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2871 struct status_block *sblk = bp->status_blk;
2874 work_done = bnx2_poll_work(bp, work_done, budget);
2876 if (unlikely(work_done >= budget))
2879 /* bp->last_status_idx is used below to tell the hw how
2880 * much work has been processed, so we must read it before
2881 * checking for more work.
2883 bp->last_status_idx = sblk->status_idx;
2885 if (likely(!bnx2_has_work(bp))) {
2886 netif_rx_complete(bp->dev, napi);
2887 if (likely(bp->flags & USING_MSI_FLAG)) {
2888 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2889 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2890 bp->last_status_idx);
2893 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2894 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2895 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2896 bp->last_status_idx);
2898 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2899 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2900 bp->last_status_idx);
2908 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2909 * from set_multicast.
2912 bnx2_set_rx_mode(struct net_device *dev)
2914 struct bnx2 *bp = netdev_priv(dev);
2915 u32 rx_mode, sort_mode;
2918 spin_lock_bh(&bp->phy_lock);
2920 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2921 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2922 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2924 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2925 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2927 if (!(bp->flags & ASF_ENABLE_FLAG))
2928 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2930 if (dev->flags & IFF_PROMISC) {
2931 /* Promiscuous mode. */
2932 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2933 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2934 BNX2_RPM_SORT_USER0_PROM_VLAN;
2936 else if (dev->flags & IFF_ALLMULTI) {
2937 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2938 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2941 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2944 /* Accept one or more multicast(s). */
2945 struct dev_mc_list *mclist;
2946 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2951 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2953 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2954 i++, mclist = mclist->next) {
2956 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2958 regidx = (bit & 0xe0) >> 5;
2960 mc_filter[regidx] |= (1 << bit);
2963 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2964 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2968 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2971 if (rx_mode != bp->rx_mode) {
2972 bp->rx_mode = rx_mode;
2973 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2976 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2977 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2978 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2980 spin_unlock_bh(&bp->phy_lock);
2984 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2991 for (i = 0; i < rv2p_code_len; i += 8) {
2992 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2994 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2997 if (rv2p_proc == RV2P_PROC1) {
2998 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2999 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3002 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3003 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3007 /* Reset the processor, un-stall is done later. */
3008 if (rv2p_proc == RV2P_PROC1) {
3009 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3012 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3017 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3024 val = REG_RD_IND(bp, cpu_reg->mode);
3025 val |= cpu_reg->mode_value_halt;
3026 REG_WR_IND(bp, cpu_reg->mode, val);
3027 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3029 /* Load the Text area. */
3030 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3034 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3039 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3040 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
3044 /* Load the Data area. */
3045 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3049 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3050 REG_WR_IND(bp, offset, fw->data[j]);
3054 /* Load the SBSS area. */
3055 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3059 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3060 REG_WR_IND(bp, offset, 0);
3064 /* Load the BSS area. */
3065 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3069 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3070 REG_WR_IND(bp, offset, 0);
3074 /* Load the Read-Only area. */
3075 offset = cpu_reg->spad_base +
3076 (fw->rodata_addr - cpu_reg->mips_view_base);
3080 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3081 REG_WR_IND(bp, offset, fw->rodata[j]);
3085 /* Clear the pre-fetch instruction. */
3086 REG_WR_IND(bp, cpu_reg->inst, 0);
3087 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3089 /* Start the CPU. */
3090 val = REG_RD_IND(bp, cpu_reg->mode);
3091 val &= ~cpu_reg->mode_value_halt;
3092 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3093 REG_WR_IND(bp, cpu_reg->mode, val);
3099 bnx2_init_cpus(struct bnx2 *bp)
3101 struct cpu_reg cpu_reg;
3106 /* Initialize the RV2P processor. */
3107 text = vmalloc(FW_BUF_SIZE);
3110 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3111 rv2p = bnx2_xi_rv2p_proc1;
3112 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3114 rv2p = bnx2_rv2p_proc1;
3115 rv2p_len = sizeof(bnx2_rv2p_proc1);
3117 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3121 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3123 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3124 rv2p = bnx2_xi_rv2p_proc2;
3125 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3127 rv2p = bnx2_rv2p_proc2;
3128 rv2p_len = sizeof(bnx2_rv2p_proc2);
3130 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3134 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3136 /* Initialize the RX Processor. */
3137 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3138 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3139 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3140 cpu_reg.state = BNX2_RXP_CPU_STATE;
3141 cpu_reg.state_value_clear = 0xffffff;
3142 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3143 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3144 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3145 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3146 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3147 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3148 cpu_reg.mips_view_base = 0x8000000;
3150 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3151 fw = &bnx2_rxp_fw_09;
3153 fw = &bnx2_rxp_fw_06;
3156 rc = load_cpu_fw(bp, &cpu_reg, fw);
3160 /* Initialize the TX Processor. */
3161 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3162 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3163 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3164 cpu_reg.state = BNX2_TXP_CPU_STATE;
3165 cpu_reg.state_value_clear = 0xffffff;
3166 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3167 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3168 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3169 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3170 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3171 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3172 cpu_reg.mips_view_base = 0x8000000;
3174 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3175 fw = &bnx2_txp_fw_09;
3177 fw = &bnx2_txp_fw_06;
3180 rc = load_cpu_fw(bp, &cpu_reg, fw);
3184 /* Initialize the TX Patch-up Processor. */
3185 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3186 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3187 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3188 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3189 cpu_reg.state_value_clear = 0xffffff;
3190 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3191 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3192 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3193 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3194 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3195 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3196 cpu_reg.mips_view_base = 0x8000000;
3198 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3199 fw = &bnx2_tpat_fw_09;
3201 fw = &bnx2_tpat_fw_06;
3204 rc = load_cpu_fw(bp, &cpu_reg, fw);
3208 /* Initialize the Completion Processor. */
3209 cpu_reg.mode = BNX2_COM_CPU_MODE;
3210 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3211 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3212 cpu_reg.state = BNX2_COM_CPU_STATE;
3213 cpu_reg.state_value_clear = 0xffffff;
3214 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3215 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3216 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3217 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3218 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3219 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3220 cpu_reg.mips_view_base = 0x8000000;
3222 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3223 fw = &bnx2_com_fw_09;
3225 fw = &bnx2_com_fw_06;
3228 rc = load_cpu_fw(bp, &cpu_reg, fw);
3232 /* Initialize the Command Processor. */
3233 cpu_reg.mode = BNX2_CP_CPU_MODE;
3234 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3235 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3236 cpu_reg.state = BNX2_CP_CPU_STATE;
3237 cpu_reg.state_value_clear = 0xffffff;
3238 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3239 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3240 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3241 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3242 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3243 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3244 cpu_reg.mips_view_base = 0x8000000;
3246 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3247 fw = &bnx2_cp_fw_09;
3249 fw = &bnx2_cp_fw_06;
3252 rc = load_cpu_fw(bp, &cpu_reg, fw);
3260 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3264 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3270 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3271 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3272 PCI_PM_CTRL_PME_STATUS);
3274 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3275 /* delay required during transition out of D3hot */
3278 val = REG_RD(bp, BNX2_EMAC_MODE);
3279 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3280 val &= ~BNX2_EMAC_MODE_MPKT;
3281 REG_WR(bp, BNX2_EMAC_MODE, val);
3283 val = REG_RD(bp, BNX2_RPM_CONFIG);
3284 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3285 REG_WR(bp, BNX2_RPM_CONFIG, val);
3296 autoneg = bp->autoneg;
3297 advertising = bp->advertising;
3299 if (bp->phy_port == PORT_TP) {
3300 bp->autoneg = AUTONEG_SPEED;
3301 bp->advertising = ADVERTISED_10baseT_Half |
3302 ADVERTISED_10baseT_Full |
3303 ADVERTISED_100baseT_Half |
3304 ADVERTISED_100baseT_Full |
3308 spin_lock_bh(&bp->phy_lock);
3309 bnx2_setup_phy(bp, bp->phy_port);
3310 spin_unlock_bh(&bp->phy_lock);
3312 bp->autoneg = autoneg;
3313 bp->advertising = advertising;
3315 bnx2_set_mac_addr(bp);
3317 val = REG_RD(bp, BNX2_EMAC_MODE);
3319 /* Enable port mode. */
3320 val &= ~BNX2_EMAC_MODE_PORT;
3321 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3322 BNX2_EMAC_MODE_ACPI_RCVD |
3323 BNX2_EMAC_MODE_MPKT;
3324 if (bp->phy_port == PORT_TP)
3325 val |= BNX2_EMAC_MODE_PORT_MII;
3327 val |= BNX2_EMAC_MODE_PORT_GMII;
3328 if (bp->line_speed == SPEED_2500)
3329 val |= BNX2_EMAC_MODE_25G_MODE;
3332 REG_WR(bp, BNX2_EMAC_MODE, val);
3334 /* receive all multicast */
3335 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3336 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3339 REG_WR(bp, BNX2_EMAC_RX_MODE,
3340 BNX2_EMAC_RX_MODE_SORT_MODE);
3342 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3343 BNX2_RPM_SORT_USER0_MC_EN;
3344 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3345 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3346 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3347 BNX2_RPM_SORT_USER0_ENA);
3349 /* Need to enable EMAC and RPM for WOL. */
3350 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3351 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3352 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3353 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3355 val = REG_RD(bp, BNX2_RPM_CONFIG);
3356 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3357 REG_WR(bp, BNX2_RPM_CONFIG, val);
3359 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3362 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3365 if (!(bp->flags & NO_WOL_FLAG))
3366 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3368 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3369 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3370 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3379 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3381 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3384 /* No more memory access after this point until
3385 * device is brought back to D0.
3397 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3402 /* Request access to the flash interface. */
3403 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3404 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3405 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3406 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3412 if (j >= NVRAM_TIMEOUT_COUNT)
3419 bnx2_release_nvram_lock(struct bnx2 *bp)
3424 /* Relinquish nvram interface. */
3425 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3427 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3428 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3429 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3435 if (j >= NVRAM_TIMEOUT_COUNT)
3443 bnx2_enable_nvram_write(struct bnx2 *bp)
3447 val = REG_RD(bp, BNX2_MISC_CFG);
3448 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3450 if (bp->flash_info->flags & BNX2_NV_WREN) {
3453 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3454 REG_WR(bp, BNX2_NVM_COMMAND,
3455 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3457 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3460 val = REG_RD(bp, BNX2_NVM_COMMAND);
3461 if (val & BNX2_NVM_COMMAND_DONE)
3465 if (j >= NVRAM_TIMEOUT_COUNT)
3472 bnx2_disable_nvram_write(struct bnx2 *bp)
3476 val = REG_RD(bp, BNX2_MISC_CFG);
3477 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3482 bnx2_enable_nvram_access(struct bnx2 *bp)
3486 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3487 /* Enable both bits, even on read. */
3488 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3489 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3493 bnx2_disable_nvram_access(struct bnx2 *bp)
3497 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3498 /* Disable both bits, even after read. */
3499 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3500 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3501 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3505 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3510 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3511 /* Buffered flash, no erase needed */
3514 /* Build an erase command */
3515 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3516 BNX2_NVM_COMMAND_DOIT;
3518 /* Need to clear DONE bit separately. */
3519 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3521 /* Address of the NVRAM to read from. */
3522 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3524 /* Issue an erase command. */
3525 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3527 /* Wait for completion. */
3528 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3533 val = REG_RD(bp, BNX2_NVM_COMMAND);
3534 if (val & BNX2_NVM_COMMAND_DONE)
3538 if (j >= NVRAM_TIMEOUT_COUNT)
3545 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3550 /* Build the command word. */
3551 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3553 /* Calculate an offset of a buffered flash, not needed for 5709. */
3554 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3555 offset = ((offset / bp->flash_info->page_size) <<
3556 bp->flash_info->page_bits) +
3557 (offset % bp->flash_info->page_size);
3560 /* Need to clear DONE bit separately. */
3561 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3563 /* Address of the NVRAM to read from. */
3564 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3566 /* Issue a read command. */
3567 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3569 /* Wait for completion. */
3570 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3575 val = REG_RD(bp, BNX2_NVM_COMMAND);
3576 if (val & BNX2_NVM_COMMAND_DONE) {
3577 val = REG_RD(bp, BNX2_NVM_READ);
3579 val = be32_to_cpu(val);
3580 memcpy(ret_val, &val, 4);
3584 if (j >= NVRAM_TIMEOUT_COUNT)
3592 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3597 /* Build the command word. */
3598 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3600 /* Calculate an offset of a buffered flash, not needed for 5709. */
3601 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3602 offset = ((offset / bp->flash_info->page_size) <<
3603 bp->flash_info->page_bits) +
3604 (offset % bp->flash_info->page_size);
3607 /* Need to clear DONE bit separately. */
3608 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3610 memcpy(&val32, val, 4);
3611 val32 = cpu_to_be32(val32);
3613 /* Write the data. */
3614 REG_WR(bp, BNX2_NVM_WRITE, val32);
3616 /* Address of the NVRAM to write to. */
3617 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3619 /* Issue the write command. */
3620 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3622 /* Wait for completion. */
3623 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3626 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3629 if (j >= NVRAM_TIMEOUT_COUNT)
3636 bnx2_init_nvram(struct bnx2 *bp)
3639 int j, entry_count, rc = 0;
3640 struct flash_spec *flash;
3642 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3643 bp->flash_info = &flash_5709;
3644 goto get_flash_size;
3647 /* Determine the selected interface. */
3648 val = REG_RD(bp, BNX2_NVM_CFG1);
3650 entry_count = ARRAY_SIZE(flash_table);
3652 if (val & 0x40000000) {
3654 /* Flash interface has been reconfigured */
3655 for (j = 0, flash = &flash_table[0]; j < entry_count;
3657 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3658 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3659 bp->flash_info = flash;
3666 /* Not yet been reconfigured */
3668 if (val & (1 << 23))
3669 mask = FLASH_BACKUP_STRAP_MASK;
3671 mask = FLASH_STRAP_MASK;
3673 for (j = 0, flash = &flash_table[0]; j < entry_count;
3676 if ((val & mask) == (flash->strapping & mask)) {
3677 bp->flash_info = flash;
3679 /* Request access to the flash interface. */
3680 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3683 /* Enable access to flash interface */
3684 bnx2_enable_nvram_access(bp);
3686 /* Reconfigure the flash interface */
3687 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3688 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3689 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3690 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3692 /* Disable access to flash interface */
3693 bnx2_disable_nvram_access(bp);
3694 bnx2_release_nvram_lock(bp);
3699 } /* if (val & 0x40000000) */
3701 if (j == entry_count) {
3702 bp->flash_info = NULL;
3703 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3708 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3709 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3711 bp->flash_size = val;
3713 bp->flash_size = bp->flash_info->total_size;
3719 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3723 u32 cmd_flags, offset32, len32, extra;
3728 /* Request access to the flash interface. */
3729 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3732 /* Enable access to flash interface */
3733 bnx2_enable_nvram_access(bp);
3746 pre_len = 4 - (offset & 3);
3748 if (pre_len >= len32) {
3750 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3751 BNX2_NVM_COMMAND_LAST;
3754 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3757 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3762 memcpy(ret_buf, buf + (offset & 3), pre_len);
3769 extra = 4 - (len32 & 3);
3770 len32 = (len32 + 4) & ~3;
3777 cmd_flags = BNX2_NVM_COMMAND_LAST;
3779 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3780 BNX2_NVM_COMMAND_LAST;
3782 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3784 memcpy(ret_buf, buf, 4 - extra);
3786 else if (len32 > 0) {
3789 /* Read the first word. */
3793 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3795 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3797 /* Advance to the next dword. */
3802 while (len32 > 4 && rc == 0) {
3803 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3805 /* Advance to the next dword. */
3814 cmd_flags = BNX2_NVM_COMMAND_LAST;
3815 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3817 memcpy(ret_buf, buf, 4 - extra);
3820 /* Disable access to flash interface */
3821 bnx2_disable_nvram_access(bp);
3823 bnx2_release_nvram_lock(bp);
3829 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3832 u32 written, offset32, len32;
3833 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3835 int align_start, align_end;
3840 align_start = align_end = 0;
3842 if ((align_start = (offset32 & 3))) {
3844 len32 += align_start;
3847 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3852 align_end = 4 - (len32 & 3);
3854 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3858 if (align_start || align_end) {
3859 align_buf = kmalloc(len32, GFP_KERNEL);
3860 if (align_buf == NULL)
3863 memcpy(align_buf, start, 4);
3866 memcpy(align_buf + len32 - 4, end, 4);
3868 memcpy(align_buf + align_start, data_buf, buf_size);
3872 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3873 flash_buffer = kmalloc(264, GFP_KERNEL);
3874 if (flash_buffer == NULL) {
3876 goto nvram_write_end;
3881 while ((written < len32) && (rc == 0)) {
3882 u32 page_start, page_end, data_start, data_end;
3883 u32 addr, cmd_flags;
3886 /* Find the page_start addr */
3887 page_start = offset32 + written;
3888 page_start -= (page_start % bp->flash_info->page_size);
3889 /* Find the page_end addr */
3890 page_end = page_start + bp->flash_info->page_size;
3891 /* Find the data_start addr */
3892 data_start = (written == 0) ? offset32 : page_start;
3893 /* Find the data_end addr */
3894 data_end = (page_end > offset32 + len32) ?
3895 (offset32 + len32) : page_end;
3897 /* Request access to the flash interface. */
3898 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3899 goto nvram_write_end;
3901 /* Enable access to flash interface */
3902 bnx2_enable_nvram_access(bp);
3904 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3905 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3908 /* Read the whole page into the buffer
3909 * (non-buffer flash only) */
3910 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3911 if (j == (bp->flash_info->page_size - 4)) {
3912 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3914 rc = bnx2_nvram_read_dword(bp,
3920 goto nvram_write_end;
3926 /* Enable writes to flash interface (unlock write-protect) */
3927 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3928 goto nvram_write_end;
3930 /* Loop to write back the buffer data from page_start to
3933 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3934 /* Erase the page */
3935 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3936 goto nvram_write_end;
3938 /* Re-enable the write again for the actual write */
3939 bnx2_enable_nvram_write(bp);
3941 for (addr = page_start; addr < data_start;
3942 addr += 4, i += 4) {
3944 rc = bnx2_nvram_write_dword(bp, addr,
3945 &flash_buffer[i], cmd_flags);
3948 goto nvram_write_end;
3954 /* Loop to write the new data from data_start to data_end */
3955 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3956 if ((addr == page_end - 4) ||
3957 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3958 (addr == data_end - 4))) {
3960 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3962 rc = bnx2_nvram_write_dword(bp, addr, buf,
3966 goto nvram_write_end;
3972 /* Loop to write back the buffer data from data_end
3974 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3975 for (addr = data_end; addr < page_end;
3976 addr += 4, i += 4) {
3978 if (addr == page_end-4) {
3979 cmd_flags = BNX2_NVM_COMMAND_LAST;
3981 rc = bnx2_nvram_write_dword(bp, addr,
3982 &flash_buffer[i], cmd_flags);
3985 goto nvram_write_end;
3991 /* Disable writes to flash interface (lock write-protect) */
3992 bnx2_disable_nvram_write(bp);
3994 /* Disable access to flash interface */
3995 bnx2_disable_nvram_access(bp);
3996 bnx2_release_nvram_lock(bp);
3998 /* Increment written */
3999 written += data_end - data_start;
4003 kfree(flash_buffer);
4009 bnx2_init_remote_phy(struct bnx2 *bp)
4013 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4014 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4017 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4018 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4021 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4022 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4024 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4025 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4026 bp->phy_port = PORT_FIBRE;
4028 bp->phy_port = PORT_TP;
4030 if (netif_running(bp->dev)) {
4033 if (val & BNX2_LINK_STATUS_LINK_UP) {
4035 netif_carrier_on(bp->dev);
4038 netif_carrier_off(bp->dev);
4040 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4041 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4042 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4049 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4055 /* Wait for the current PCI transaction to complete before
4056 * issuing a reset. */
4057 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4058 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4059 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4060 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4061 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4062 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4065 /* Wait for the firmware to tell us it is ok to issue a reset. */
4066 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4068 /* Deposit a driver reset signature so the firmware knows that
4069 * this is a soft reset. */
4070 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
4071 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4073 /* Do a dummy read to force the chip to complete all current transaction
4074 * before we issue a reset. */
4075 val = REG_RD(bp, BNX2_MISC_ID);
4077 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4078 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4079 REG_RD(bp, BNX2_MISC_COMMAND);
4082 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4083 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4085 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4088 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4089 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4090 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4093 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4095 /* Reading back any register after chip reset will hang the
4096 * bus on 5706 A0 and A1. The msleep below provides plenty
4097 * of margin for write posting.
4099 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4100 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4103 /* Reset takes approximate 30 usec */
4104 for (i = 0; i < 10; i++) {
4105 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4106 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4107 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4112 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4113 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4114 printk(KERN_ERR PFX "Chip reset did not complete\n");
4119 /* Make sure byte swapping is properly configured. */
4120 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4121 if (val != 0x01020304) {
4122 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4126 /* Wait for the firmware to finish its initialization. */
4127 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4131 spin_lock_bh(&bp->phy_lock);
4132 old_port = bp->phy_port;
4133 bnx2_init_remote_phy(bp);
4134 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4135 bnx2_set_default_remote_link(bp);
4136 spin_unlock_bh(&bp->phy_lock);
4138 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4139 /* Adjust the voltage regular to two steps lower. The default
4140 * of this register is 0x0000000e. */
4141 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4143 /* Remove bad rbuf memory from the free pool. */
4144 rc = bnx2_alloc_bad_rbuf(bp);
4151 bnx2_init_chip(struct bnx2 *bp)
4156 /* Make sure the interrupt is not active. */
4157 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4159 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4160 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4162 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4164 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4165 DMA_READ_CHANS << 12 |
4166 DMA_WRITE_CHANS << 16;
4168 val |= (0x2 << 20) | (1 << 11);
4170 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4173 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4174 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4175 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4177 REG_WR(bp, BNX2_DMA_CONFIG, val);
4179 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4180 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4181 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4182 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4185 if (bp->flags & PCIX_FLAG) {
4188 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4190 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4191 val16 & ~PCI_X_CMD_ERO);
4194 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4195 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4196 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4197 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4199 /* Initialize context mapping and zero out the quick contexts. The
4200 * context block must have already been enabled. */
4201 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4202 rc = bnx2_init_5709_context(bp);
4206 bnx2_init_context(bp);
4208 if ((rc = bnx2_init_cpus(bp)) != 0)
4211 bnx2_init_nvram(bp);
4213 bnx2_set_mac_addr(bp);
4215 val = REG_RD(bp, BNX2_MQ_CONFIG);
4216 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4217 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4218 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4219 val |= BNX2_MQ_CONFIG_HALT_DIS;
4221 REG_WR(bp, BNX2_MQ_CONFIG, val);
4223 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4224 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4225 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4227 val = (BCM_PAGE_BITS - 8) << 24;
4228 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4230 /* Configure page size. */
4231 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4232 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4233 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4234 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4236 val = bp->mac_addr[0] +
4237 (bp->mac_addr[1] << 8) +
4238 (bp->mac_addr[2] << 16) +
4240 (bp->mac_addr[4] << 8) +
4241 (bp->mac_addr[5] << 16);
4242 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4244 /* Program the MTU. Also include 4 bytes for CRC32. */
4245 val = bp->dev->mtu + ETH_HLEN + 4;
4246 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4247 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4248 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4250 bp->last_status_idx = 0;
4251 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4253 /* Set up how to generate a link change interrupt. */
4254 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4256 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4257 (u64) bp->status_blk_mapping & 0xffffffff);
4258 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4260 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4261 (u64) bp->stats_blk_mapping & 0xffffffff);
4262 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4263 (u64) bp->stats_blk_mapping >> 32);
4265 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4266 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4268 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4269 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4271 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4272 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4274 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4276 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4278 REG_WR(bp, BNX2_HC_COM_TICKS,
4279 (bp->com_ticks_int << 16) | bp->com_ticks);
4281 REG_WR(bp, BNX2_HC_CMD_TICKS,
4282 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4284 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4285 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4287 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4288 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4290 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4291 val = BNX2_HC_CONFIG_COLLECT_STATS;
4293 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4294 BNX2_HC_CONFIG_COLLECT_STATS;
4297 if (bp->flags & ONE_SHOT_MSI_FLAG)
4298 val |= BNX2_HC_CONFIG_ONE_SHOT;
4300 REG_WR(bp, BNX2_HC_CONFIG, val);
4302 /* Clear internal stats counters. */
4303 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4305 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4307 /* Initialize the receive filter. */
4308 bnx2_set_rx_mode(bp->dev);
4310 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4311 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4312 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4313 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4315 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4318 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4319 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4323 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4329 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4331 u32 val, offset0, offset1, offset2, offset3;
4333 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4334 offset0 = BNX2_L2CTX_TYPE_XI;
4335 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4336 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4337 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4339 offset0 = BNX2_L2CTX_TYPE;
4340 offset1 = BNX2_L2CTX_CMD_TYPE;
4341 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4342 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4344 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4345 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4347 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4348 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4350 val = (u64) bp->tx_desc_mapping >> 32;
4351 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4353 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4354 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4358 bnx2_init_tx_ring(struct bnx2 *bp)
4363 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4365 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4367 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4368 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4373 bp->tx_prod_bseq = 0;
4376 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4377 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4379 bnx2_init_tx_context(bp, cid);
4383 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4389 for (i = 0; i < num_rings; i++) {
4392 rxbd = &rx_ring[i][0];
4393 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4394 rxbd->rx_bd_len = buf_size;
4395 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4397 if (i == (num_rings - 1))
4401 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4402 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4407 bnx2_init_rx_ring(struct bnx2 *bp)
4410 u16 prod, ring_prod;
4411 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4415 bp->rx_prod_bseq = 0;
4419 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4420 bp->rx_buf_use_size, bp->rx_max_ring);
4422 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4423 if (bp->rx_pg_ring_size) {
4424 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4425 bp->rx_pg_desc_mapping,
4426 PAGE_SIZE, bp->rx_max_pg_ring);
4427 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4428 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4429 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4430 BNX2_L2CTX_RBDC_JUMBO_KEY);
4432 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4433 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4435 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4436 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4438 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4439 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4442 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4443 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4445 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4447 val = (u64) bp->rx_desc_mapping[0] >> 32;
4448 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4450 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4451 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4453 ring_prod = prod = bp->rx_pg_prod;
4454 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4455 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4457 prod = NEXT_RX_BD(prod);
4458 ring_prod = RX_PG_RING_IDX(prod);
4460 bp->rx_pg_prod = prod;
4462 ring_prod = prod = bp->rx_prod;
4463 for (i = 0; i < bp->rx_ring_size; i++) {
4464 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4467 prod = NEXT_RX_BD(prod);
4468 ring_prod = RX_RING_IDX(prod);
4472 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4473 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4475 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4478 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4480 u32 max, num_rings = 1;
4482 while (ring_size > MAX_RX_DESC_CNT) {
4483 ring_size -= MAX_RX_DESC_CNT;
4486 /* round to next power of 2 */
4488 while ((max & num_rings) == 0)
4491 if (num_rings != max)
4498 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4500 u32 rx_size, rx_space, jumbo_size;
4502 /* 8 for CRC and VLAN */
4503 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4505 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4506 sizeof(struct skb_shared_info);
4508 bp->rx_copy_thresh = RX_COPY_THRESH;
4509 bp->rx_pg_ring_size = 0;
4510 bp->rx_max_pg_ring = 0;
4511 bp->rx_max_pg_ring_idx = 0;
4512 if (rx_space > PAGE_SIZE) {
4513 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4515 jumbo_size = size * pages;
4516 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4517 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4519 bp->rx_pg_ring_size = jumbo_size;
4520 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4522 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4523 rx_size = RX_COPY_THRESH + bp->rx_offset;
4524 bp->rx_copy_thresh = 0;
4527 bp->rx_buf_use_size = rx_size;
4529 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4530 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4531 bp->rx_ring_size = size;
4532 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4533 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4537 bnx2_free_tx_skbs(struct bnx2 *bp)
4541 if (bp->tx_buf_ring == NULL)
4544 for (i = 0; i < TX_DESC_CNT; ) {
4545 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4546 struct sk_buff *skb = tx_buf->skb;
4554 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4555 skb_headlen(skb), PCI_DMA_TODEVICE);
4559 last = skb_shinfo(skb)->nr_frags;
4560 for (j = 0; j < last; j++) {
4561 tx_buf = &bp->tx_buf_ring[i + j + 1];
4562 pci_unmap_page(bp->pdev,
4563 pci_unmap_addr(tx_buf, mapping),
4564 skb_shinfo(skb)->frags[j].size,
4574 bnx2_free_rx_skbs(struct bnx2 *bp)
4578 if (bp->rx_buf_ring == NULL)
4581 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4582 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4583 struct sk_buff *skb = rx_buf->skb;
4588 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4589 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4595 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4596 bnx2_free_rx_page(bp, i);
4600 bnx2_free_skbs(struct bnx2 *bp)
4602 bnx2_free_tx_skbs(bp);
4603 bnx2_free_rx_skbs(bp);
4607 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4611 rc = bnx2_reset_chip(bp, reset_code);
4616 if ((rc = bnx2_init_chip(bp)) != 0)
4619 bnx2_init_tx_ring(bp);
4620 bnx2_init_rx_ring(bp);
4625 bnx2_init_nic(struct bnx2 *bp)
4629 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4632 spin_lock_bh(&bp->phy_lock);
4635 spin_unlock_bh(&bp->phy_lock);
4640 bnx2_test_registers(struct bnx2 *bp)
4644 static const struct {
4647 #define BNX2_FL_NOT_5709 1
4651 { 0x006c, 0, 0x00000000, 0x0000003f },
4652 { 0x0090, 0, 0xffffffff, 0x00000000 },
4653 { 0x0094, 0, 0x00000000, 0x00000000 },
4655 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4656 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4657 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4658 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4659 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4660 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4661 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4662 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4663 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4665 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4666 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4667 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4668 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4669 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4670 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4672 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4673 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4674 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4676 { 0x1000, 0, 0x00000000, 0x00000001 },
4677 { 0x1004, 0, 0x00000000, 0x000f0001 },
4679 { 0x1408, 0, 0x01c00800, 0x00000000 },
4680 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4681 { 0x14a8, 0, 0x00000000, 0x000001ff },
4682 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4683 { 0x14b0, 0, 0x00000002, 0x00000001 },
4684 { 0x14b8, 0, 0x00000000, 0x00000000 },
4685 { 0x14c0, 0, 0x00000000, 0x00000009 },
4686 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4687 { 0x14cc, 0, 0x00000000, 0x00000001 },
4688 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4690 { 0x1800, 0, 0x00000000, 0x00000001 },
4691 { 0x1804, 0, 0x00000000, 0x00000003 },
4693 { 0x2800, 0, 0x00000000, 0x00000001 },
4694 { 0x2804, 0, 0x00000000, 0x00003f01 },
4695 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4696 { 0x2810, 0, 0xffff0000, 0x00000000 },
4697 { 0x2814, 0, 0xffff0000, 0x00000000 },
4698 { 0x2818, 0, 0xffff0000, 0x00000000 },
4699 { 0x281c, 0, 0xffff0000, 0x00000000 },
4700 { 0x2834, 0, 0xffffffff, 0x00000000 },
4701 { 0x2840, 0, 0x00000000, 0xffffffff },
4702 { 0x2844, 0, 0x00000000, 0xffffffff },
4703 { 0x2848, 0, 0xffffffff, 0x00000000 },
4704 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4706 { 0x2c00, 0, 0x00000000, 0x00000011 },
4707 { 0x2c04, 0, 0x00000000, 0x00030007 },
4709 { 0x3c00, 0, 0x00000000, 0x00000001 },
4710 { 0x3c04, 0, 0x00000000, 0x00070000 },
4711 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4712 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4713 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4714 { 0x3c14, 0, 0x00000000, 0xffffffff },
4715 { 0x3c18, 0, 0x00000000, 0xffffffff },
4716 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4717 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4719 { 0x5004, 0, 0x00000000, 0x0000007f },
4720 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4722 { 0x5c00, 0, 0x00000000, 0x00000001 },
4723 { 0x5c04, 0, 0x00000000, 0x0003000f },
4724 { 0x5c08, 0, 0x00000003, 0x00000000 },
4725 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4726 { 0x5c10, 0, 0x00000000, 0xffffffff },
4727 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4728 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4729 { 0x5c88, 0, 0x00000000, 0x00077373 },
4730 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4732 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4733 { 0x680c, 0, 0xffffffff, 0x00000000 },
4734 { 0x6810, 0, 0xffffffff, 0x00000000 },
4735 { 0x6814, 0, 0xffffffff, 0x00000000 },
4736 { 0x6818, 0, 0xffffffff, 0x00000000 },
4737 { 0x681c, 0, 0xffffffff, 0x00000000 },
4738 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4739 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4740 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4741 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4742 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4743 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4744 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4745 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4746 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4747 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4748 { 0x684c, 0, 0xffffffff, 0x00000000 },
4749 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4750 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4751 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4752 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4753 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4754 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4756 { 0xffff, 0, 0x00000000, 0x00000000 },
4761 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4764 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4765 u32 offset, rw_mask, ro_mask, save_val, val;
4766 u16 flags = reg_tbl[i].flags;
4768 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4771 offset = (u32) reg_tbl[i].offset;
4772 rw_mask = reg_tbl[i].rw_mask;
4773 ro_mask = reg_tbl[i].ro_mask;
4775 save_val = readl(bp->regview + offset);
4777 writel(0, bp->regview + offset);
4779 val = readl(bp->regview + offset);
4780 if ((val & rw_mask) != 0) {
4784 if ((val & ro_mask) != (save_val & ro_mask)) {
4788 writel(0xffffffff, bp->regview + offset);
4790 val = readl(bp->regview + offset);
4791 if ((val & rw_mask) != rw_mask) {
4795 if ((val & ro_mask) != (save_val & ro_mask)) {
4799 writel(save_val, bp->regview + offset);
4803 writel(save_val, bp->regview + offset);
4811 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4813 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4814 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4817 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4820 for (offset = 0; offset < size; offset += 4) {
4822 REG_WR_IND(bp, start + offset, test_pattern[i]);
4824 if (REG_RD_IND(bp, start + offset) !=
4834 bnx2_test_memory(struct bnx2 *bp)
4838 static struct mem_entry {
4841 } mem_tbl_5706[] = {
4842 { 0x60000, 0x4000 },
4843 { 0xa0000, 0x3000 },
4844 { 0xe0000, 0x4000 },
4845 { 0x120000, 0x4000 },
4846 { 0x1a0000, 0x4000 },
4847 { 0x160000, 0x4000 },
4851 { 0x60000, 0x4000 },
4852 { 0xa0000, 0x3000 },
4853 { 0xe0000, 0x4000 },
4854 { 0x120000, 0x4000 },
4855 { 0x1a0000, 0x4000 },
4858 struct mem_entry *mem_tbl;
4860 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4861 mem_tbl = mem_tbl_5709;
4863 mem_tbl = mem_tbl_5706;
4865 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4866 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4867 mem_tbl[i].len)) != 0) {
4875 #define BNX2_MAC_LOOPBACK 0
4876 #define BNX2_PHY_LOOPBACK 1
4879 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4881 unsigned int pkt_size, num_pkts, i;
4882 struct sk_buff *skb, *rx_skb;
4883 unsigned char *packet;
4884 u16 rx_start_idx, rx_idx;
4887 struct sw_bd *rx_buf;
4888 struct l2_fhdr *rx_hdr;
4891 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4892 bp->loopback = MAC_LOOPBACK;
4893 bnx2_set_mac_loopback(bp);
4895 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4896 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4899 bp->loopback = PHY_LOOPBACK;
4900 bnx2_set_phy_loopback(bp);
4905 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
4906 skb = netdev_alloc_skb(bp->dev, pkt_size);
4909 packet = skb_put(skb, pkt_size);
4910 memcpy(packet, bp->dev->dev_addr, 6);
4911 memset(packet + 6, 0x0, 8);
4912 for (i = 14; i < pkt_size; i++)
4913 packet[i] = (unsigned char) (i & 0xff);
4915 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4918 REG_WR(bp, BNX2_HC_COMMAND,
4919 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4921 REG_RD(bp, BNX2_HC_COMMAND);
4924 rx_start_idx = bnx2_get_hw_rx_cons(bp);
4928 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4930 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4931 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4932 txbd->tx_bd_mss_nbytes = pkt_size;
4933 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4936 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4937 bp->tx_prod_bseq += pkt_size;
4939 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4940 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4944 REG_WR(bp, BNX2_HC_COMMAND,
4945 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4947 REG_RD(bp, BNX2_HC_COMMAND);
4951 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4954 if (bnx2_get_hw_tx_cons(bp) != bp->tx_prod)
4955 goto loopback_test_done;
4957 rx_idx = bnx2_get_hw_rx_cons(bp);
4958 if (rx_idx != rx_start_idx + num_pkts) {
4959 goto loopback_test_done;
4962 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4963 rx_skb = rx_buf->skb;
4965 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4966 skb_reserve(rx_skb, bp->rx_offset);
4968 pci_dma_sync_single_for_cpu(bp->pdev,
4969 pci_unmap_addr(rx_buf, mapping),
4970 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4972 if (rx_hdr->l2_fhdr_status &
4973 (L2_FHDR_ERRORS_BAD_CRC |
4974 L2_FHDR_ERRORS_PHY_DECODE |
4975 L2_FHDR_ERRORS_ALIGNMENT |
4976 L2_FHDR_ERRORS_TOO_SHORT |
4977 L2_FHDR_ERRORS_GIANT_FRAME)) {
4979 goto loopback_test_done;
4982 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4983 goto loopback_test_done;
4986 for (i = 14; i < pkt_size; i++) {
4987 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4988 goto loopback_test_done;
4999 #define BNX2_MAC_LOOPBACK_FAILED 1
5000 #define BNX2_PHY_LOOPBACK_FAILED 2
5001 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5002 BNX2_PHY_LOOPBACK_FAILED)
5005 bnx2_test_loopback(struct bnx2 *bp)
5009 if (!netif_running(bp->dev))
5010 return BNX2_LOOPBACK_FAILED;
5012 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5013 spin_lock_bh(&bp->phy_lock);
5015 spin_unlock_bh(&bp->phy_lock);
5016 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5017 rc |= BNX2_MAC_LOOPBACK_FAILED;
5018 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5019 rc |= BNX2_PHY_LOOPBACK_FAILED;
5023 #define NVRAM_SIZE 0x200
5024 #define CRC32_RESIDUAL 0xdebb20e3
5027 bnx2_test_nvram(struct bnx2 *bp)
5029 u32 buf[NVRAM_SIZE / 4];
5030 u8 *data = (u8 *) buf;
5034 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5035 goto test_nvram_done;
5037 magic = be32_to_cpu(buf[0]);
5038 if (magic != 0x669955aa) {
5040 goto test_nvram_done;
5043 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5044 goto test_nvram_done;
5046 csum = ether_crc_le(0x100, data);
5047 if (csum != CRC32_RESIDUAL) {
5049 goto test_nvram_done;
5052 csum = ether_crc_le(0x100, data + 0x100);
5053 if (csum != CRC32_RESIDUAL) {
5062 bnx2_test_link(struct bnx2 *bp)
5066 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5071 spin_lock_bh(&bp->phy_lock);
5072 bnx2_enable_bmsr1(bp);
5073 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5074 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5075 bnx2_disable_bmsr1(bp);
5076 spin_unlock_bh(&bp->phy_lock);
5078 if (bmsr & BMSR_LSTATUS) {
5085 bnx2_test_intr(struct bnx2 *bp)
5090 if (!netif_running(bp->dev))
5093 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5095 /* This register is not touched during run-time. */
5096 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5097 REG_RD(bp, BNX2_HC_COMMAND);
5099 for (i = 0; i < 10; i++) {
5100 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5106 msleep_interruptible(10);
5115 bnx2_5706_serdes_timer(struct bnx2 *bp)
5117 spin_lock(&bp->phy_lock);
5118 if (bp->serdes_an_pending)
5119 bp->serdes_an_pending--;
5120 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5123 bp->current_interval = bp->timer_interval;
5125 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5127 if (bmcr & BMCR_ANENABLE) {
5130 bnx2_write_phy(bp, 0x1c, 0x7c00);
5131 bnx2_read_phy(bp, 0x1c, &phy1);
5133 bnx2_write_phy(bp, 0x17, 0x0f01);
5134 bnx2_read_phy(bp, 0x15, &phy2);
5135 bnx2_write_phy(bp, 0x17, 0x0f01);
5136 bnx2_read_phy(bp, 0x15, &phy2);
5138 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5139 !(phy2 & 0x20)) { /* no CONFIG */
5141 bmcr &= ~BMCR_ANENABLE;
5142 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5143 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5144 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5148 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5149 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5152 bnx2_write_phy(bp, 0x17, 0x0f01);
5153 bnx2_read_phy(bp, 0x15, &phy2);
5157 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5158 bmcr |= BMCR_ANENABLE;
5159 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5161 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5164 bp->current_interval = bp->timer_interval;
5166 spin_unlock(&bp->phy_lock);
5170 bnx2_5708_serdes_timer(struct bnx2 *bp)
5172 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5175 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5176 bp->serdes_an_pending = 0;
5180 spin_lock(&bp->phy_lock);
5181 if (bp->serdes_an_pending)
5182 bp->serdes_an_pending--;
5183 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5186 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5187 if (bmcr & BMCR_ANENABLE) {
5188 bnx2_enable_forced_2g5(bp);
5189 bp->current_interval = SERDES_FORCED_TIMEOUT;
5191 bnx2_disable_forced_2g5(bp);
5192 bp->serdes_an_pending = 2;
5193 bp->current_interval = bp->timer_interval;
5197 bp->current_interval = bp->timer_interval;
5199 spin_unlock(&bp->phy_lock);
5203 bnx2_timer(unsigned long data)
5205 struct bnx2 *bp = (struct bnx2 *) data;
5207 if (!netif_running(bp->dev))
5210 if (atomic_read(&bp->intr_sem) != 0)
5211 goto bnx2_restart_timer;
5213 bnx2_send_heart_beat(bp);
5215 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5217 /* workaround occasional corrupted counters */
5218 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5219 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5220 BNX2_HC_COMMAND_STATS_NOW);
5222 if (bp->phy_flags & PHY_SERDES_FLAG) {
5223 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5224 bnx2_5706_serdes_timer(bp);
5226 bnx2_5708_serdes_timer(bp);
5230 mod_timer(&bp->timer, jiffies + bp->current_interval);
5234 bnx2_request_irq(struct bnx2 *bp)
5236 struct net_device *dev = bp->dev;
5237 unsigned long flags;
5238 struct bnx2_irq *irq = &bp->irq_tbl[0];
5241 if (bp->flags & USING_MSI_FLAG)
5244 flags = IRQF_SHARED;
5245 rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
5250 bnx2_free_irq(struct bnx2 *bp)
5252 struct net_device *dev = bp->dev;
5254 free_irq(bp->irq_tbl[0].vector, dev);
5255 if (bp->flags & USING_MSI_FLAG) {
5256 pci_disable_msi(bp->pdev);
5257 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5262 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5264 bp->irq_tbl[0].handler = bnx2_interrupt;
5265 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5267 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5268 if (pci_enable_msi(bp->pdev) == 0) {
5269 bp->flags |= USING_MSI_FLAG;
5270 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5271 bp->flags |= ONE_SHOT_MSI_FLAG;
5272 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5274 bp->irq_tbl[0].handler = bnx2_msi;
5278 bp->irq_tbl[0].vector = bp->pdev->irq;
5281 /* Called with rtnl_lock */
5283 bnx2_open(struct net_device *dev)
5285 struct bnx2 *bp = netdev_priv(dev);
5288 netif_carrier_off(dev);
5290 bnx2_set_power_state(bp, PCI_D0);
5291 bnx2_disable_int(bp);
5293 rc = bnx2_alloc_mem(bp);
5297 bnx2_setup_int_mode(bp, disable_msi);
5298 napi_enable(&bp->napi);
5299 rc = bnx2_request_irq(bp);
5302 napi_disable(&bp->napi);
5307 rc = bnx2_init_nic(bp);
5310 napi_disable(&bp->napi);
5317 mod_timer(&bp->timer, jiffies + bp->current_interval);
5319 atomic_set(&bp->intr_sem, 0);
5321 bnx2_enable_int(bp);
5323 if (bp->flags & USING_MSI_FLAG) {
5324 /* Test MSI to make sure it is working
5325 * If MSI test fails, go back to INTx mode
5327 if (bnx2_test_intr(bp) != 0) {
5328 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5329 " using MSI, switching to INTx mode. Please"
5330 " report this failure to the PCI maintainer"
5331 " and include system chipset information.\n",
5334 bnx2_disable_int(bp);
5337 bnx2_setup_int_mode(bp, 1);
5339 rc = bnx2_init_nic(bp);
5342 rc = bnx2_request_irq(bp);
5345 napi_disable(&bp->napi);
5348 del_timer_sync(&bp->timer);
5351 bnx2_enable_int(bp);
5354 if (bp->flags & USING_MSI_FLAG) {
5355 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5358 netif_start_queue(dev);
5364 bnx2_reset_task(struct work_struct *work)
5366 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5368 if (!netif_running(bp->dev))
5371 bp->in_reset_task = 1;
5372 bnx2_netif_stop(bp);
5376 atomic_set(&bp->intr_sem, 1);
5377 bnx2_netif_start(bp);
5378 bp->in_reset_task = 0;
5382 bnx2_tx_timeout(struct net_device *dev)
5384 struct bnx2 *bp = netdev_priv(dev);
5386 /* This allows the netif to be shutdown gracefully before resetting */
5387 schedule_work(&bp->reset_task);
5391 /* Called with rtnl_lock */
5393 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5395 struct bnx2 *bp = netdev_priv(dev);
5397 bnx2_netif_stop(bp);
5400 bnx2_set_rx_mode(dev);
5402 bnx2_netif_start(bp);
5406 /* Called with netif_tx_lock.
5407 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5408 * netif_wake_queue().
5411 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5413 struct bnx2 *bp = netdev_priv(dev);
5416 struct sw_bd *tx_buf;
5417 u32 len, vlan_tag_flags, last_frag, mss;
5418 u16 prod, ring_prod;
5421 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5422 netif_stop_queue(dev);
5423 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5426 return NETDEV_TX_BUSY;
5428 len = skb_headlen(skb);
5430 ring_prod = TX_RING_IDX(prod);
5433 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5434 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5437 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5439 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5441 if ((mss = skb_shinfo(skb)->gso_size)) {
5442 u32 tcp_opt_len, ip_tcp_len;
5445 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5447 tcp_opt_len = tcp_optlen(skb);
5449 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5450 u32 tcp_off = skb_transport_offset(skb) -
5451 sizeof(struct ipv6hdr) - ETH_HLEN;
5453 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5454 TX_BD_FLAGS_SW_FLAGS;
5455 if (likely(tcp_off == 0))
5456 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5459 vlan_tag_flags |= ((tcp_off & 0x3) <<
5460 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5461 ((tcp_off & 0x10) <<
5462 TX_BD_FLAGS_TCP6_OFF4_SHL);
5463 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5466 if (skb_header_cloned(skb) &&
5467 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5469 return NETDEV_TX_OK;
5472 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5476 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5477 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5481 if (tcp_opt_len || (iph->ihl > 5)) {
5482 vlan_tag_flags |= ((iph->ihl - 5) +
5483 (tcp_opt_len >> 2)) << 8;
5489 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5491 tx_buf = &bp->tx_buf_ring[ring_prod];
5493 pci_unmap_addr_set(tx_buf, mapping, mapping);
5495 txbd = &bp->tx_desc_ring[ring_prod];
5497 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5498 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5499 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5500 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5502 last_frag = skb_shinfo(skb)->nr_frags;
5504 for (i = 0; i < last_frag; i++) {
5505 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5507 prod = NEXT_TX_BD(prod);
5508 ring_prod = TX_RING_IDX(prod);
5509 txbd = &bp->tx_desc_ring[ring_prod];
5512 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5513 len, PCI_DMA_TODEVICE);
5514 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5517 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5518 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5519 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5520 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5523 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5525 prod = NEXT_TX_BD(prod);
5526 bp->tx_prod_bseq += skb->len;
5528 REG_WR16(bp, bp->tx_bidx_addr, prod);
5529 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5534 dev->trans_start = jiffies;
5536 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5537 netif_stop_queue(dev);
5538 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5539 netif_wake_queue(dev);
5542 return NETDEV_TX_OK;
5545 /* Called with rtnl_lock */
5547 bnx2_close(struct net_device *dev)
5549 struct bnx2 *bp = netdev_priv(dev);
5552 /* Calling flush_scheduled_work() may deadlock because
5553 * linkwatch_event() may be on the workqueue and it will try to get
5554 * the rtnl_lock which we are holding.
5556 while (bp->in_reset_task)
5559 bnx2_disable_int_sync(bp);
5560 napi_disable(&bp->napi);
5561 del_timer_sync(&bp->timer);
5562 if (bp->flags & NO_WOL_FLAG)
5563 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5565 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5567 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5568 bnx2_reset_chip(bp, reset_code);
5573 netif_carrier_off(bp->dev);
5574 bnx2_set_power_state(bp, PCI_D3hot);
5578 #define GET_NET_STATS64(ctr) \
5579 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5580 (unsigned long) (ctr##_lo)
5582 #define GET_NET_STATS32(ctr) \
5585 #if (BITS_PER_LONG == 64)
5586 #define GET_NET_STATS GET_NET_STATS64
5588 #define GET_NET_STATS GET_NET_STATS32
5591 static struct net_device_stats *
5592 bnx2_get_stats(struct net_device *dev)
5594 struct bnx2 *bp = netdev_priv(dev);
5595 struct statistics_block *stats_blk = bp->stats_blk;
5596 struct net_device_stats *net_stats = &bp->net_stats;
5598 if (bp->stats_blk == NULL) {
5601 net_stats->rx_packets =
5602 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5603 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5604 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5606 net_stats->tx_packets =
5607 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5608 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5609 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5611 net_stats->rx_bytes =
5612 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5614 net_stats->tx_bytes =
5615 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5617 net_stats->multicast =
5618 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5620 net_stats->collisions =
5621 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5623 net_stats->rx_length_errors =
5624 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5625 stats_blk->stat_EtherStatsOverrsizePkts);
5627 net_stats->rx_over_errors =
5628 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5630 net_stats->rx_frame_errors =
5631 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5633 net_stats->rx_crc_errors =
5634 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5636 net_stats->rx_errors = net_stats->rx_length_errors +
5637 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5638 net_stats->rx_crc_errors;
5640 net_stats->tx_aborted_errors =
5641 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5642 stats_blk->stat_Dot3StatsLateCollisions);
5644 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5645 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5646 net_stats->tx_carrier_errors = 0;
5648 net_stats->tx_carrier_errors =
5650 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5653 net_stats->tx_errors =
5655 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5657 net_stats->tx_aborted_errors +
5658 net_stats->tx_carrier_errors;
5660 net_stats->rx_missed_errors =
5661 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5662 stats_blk->stat_FwRxDrop);
5667 /* All ethtool functions called with rtnl_lock */
5670 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5672 struct bnx2 *bp = netdev_priv(dev);
5673 int support_serdes = 0, support_copper = 0;
5675 cmd->supported = SUPPORTED_Autoneg;
5676 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5679 } else if (bp->phy_port == PORT_FIBRE)
5684 if (support_serdes) {
5685 cmd->supported |= SUPPORTED_1000baseT_Full |
5687 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5688 cmd->supported |= SUPPORTED_2500baseX_Full;
5691 if (support_copper) {
5692 cmd->supported |= SUPPORTED_10baseT_Half |
5693 SUPPORTED_10baseT_Full |
5694 SUPPORTED_100baseT_Half |
5695 SUPPORTED_100baseT_Full |
5696 SUPPORTED_1000baseT_Full |
5701 spin_lock_bh(&bp->phy_lock);
5702 cmd->port = bp->phy_port;
5703 cmd->advertising = bp->advertising;
5705 if (bp->autoneg & AUTONEG_SPEED) {
5706 cmd->autoneg = AUTONEG_ENABLE;
5709 cmd->autoneg = AUTONEG_DISABLE;
5712 if (netif_carrier_ok(dev)) {
5713 cmd->speed = bp->line_speed;
5714 cmd->duplex = bp->duplex;
5720 spin_unlock_bh(&bp->phy_lock);
5722 cmd->transceiver = XCVR_INTERNAL;
5723 cmd->phy_address = bp->phy_addr;
5729 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5731 struct bnx2 *bp = netdev_priv(dev);
5732 u8 autoneg = bp->autoneg;
5733 u8 req_duplex = bp->req_duplex;
5734 u16 req_line_speed = bp->req_line_speed;
5735 u32 advertising = bp->advertising;
5738 spin_lock_bh(&bp->phy_lock);
5740 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5741 goto err_out_unlock;
5743 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5744 goto err_out_unlock;
5746 if (cmd->autoneg == AUTONEG_ENABLE) {
5747 autoneg |= AUTONEG_SPEED;
5749 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5751 /* allow advertising 1 speed */
5752 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5753 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5754 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5755 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5757 if (cmd->port == PORT_FIBRE)
5758 goto err_out_unlock;
5760 advertising = cmd->advertising;
5762 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5763 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5764 (cmd->port == PORT_TP))
5765 goto err_out_unlock;
5766 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5767 advertising = cmd->advertising;
5768 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5769 goto err_out_unlock;
5771 if (cmd->port == PORT_FIBRE)
5772 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5774 advertising = ETHTOOL_ALL_COPPER_SPEED;
5776 advertising |= ADVERTISED_Autoneg;
5779 if (cmd->port == PORT_FIBRE) {
5780 if ((cmd->speed != SPEED_1000 &&
5781 cmd->speed != SPEED_2500) ||
5782 (cmd->duplex != DUPLEX_FULL))
5783 goto err_out_unlock;
5785 if (cmd->speed == SPEED_2500 &&
5786 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5787 goto err_out_unlock;
5789 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5790 goto err_out_unlock;
5792 autoneg &= ~AUTONEG_SPEED;
5793 req_line_speed = cmd->speed;
5794 req_duplex = cmd->duplex;
5798 bp->autoneg = autoneg;
5799 bp->advertising = advertising;
5800 bp->req_line_speed = req_line_speed;
5801 bp->req_duplex = req_duplex;
5803 err = bnx2_setup_phy(bp, cmd->port);
5806 spin_unlock_bh(&bp->phy_lock);
5812 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5814 struct bnx2 *bp = netdev_priv(dev);
5816 strcpy(info->driver, DRV_MODULE_NAME);
5817 strcpy(info->version, DRV_MODULE_VERSION);
5818 strcpy(info->bus_info, pci_name(bp->pdev));
5819 strcpy(info->fw_version, bp->fw_version);
5822 #define BNX2_REGDUMP_LEN (32 * 1024)
5825 bnx2_get_regs_len(struct net_device *dev)
5827 return BNX2_REGDUMP_LEN;
5831 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5833 u32 *p = _p, i, offset;
5835 struct bnx2 *bp = netdev_priv(dev);
5836 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5837 0x0800, 0x0880, 0x0c00, 0x0c10,
5838 0x0c30, 0x0d08, 0x1000, 0x101c,
5839 0x1040, 0x1048, 0x1080, 0x10a4,
5840 0x1400, 0x1490, 0x1498, 0x14f0,
5841 0x1500, 0x155c, 0x1580, 0x15dc,
5842 0x1600, 0x1658, 0x1680, 0x16d8,
5843 0x1800, 0x1820, 0x1840, 0x1854,
5844 0x1880, 0x1894, 0x1900, 0x1984,
5845 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5846 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5847 0x2000, 0x2030, 0x23c0, 0x2400,
5848 0x2800, 0x2820, 0x2830, 0x2850,
5849 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5850 0x3c00, 0x3c94, 0x4000, 0x4010,
5851 0x4080, 0x4090, 0x43c0, 0x4458,
5852 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5853 0x4fc0, 0x5010, 0x53c0, 0x5444,
5854 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5855 0x5fc0, 0x6000, 0x6400, 0x6428,
5856 0x6800, 0x6848, 0x684c, 0x6860,
5857 0x6888, 0x6910, 0x8000 };
5861 memset(p, 0, BNX2_REGDUMP_LEN);
5863 if (!netif_running(bp->dev))
5867 offset = reg_boundaries[0];
5869 while (offset < BNX2_REGDUMP_LEN) {
5870 *p++ = REG_RD(bp, offset);
5872 if (offset == reg_boundaries[i + 1]) {
5873 offset = reg_boundaries[i + 2];
5874 p = (u32 *) (orig_p + offset);
5881 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5883 struct bnx2 *bp = netdev_priv(dev);
5885 if (bp->flags & NO_WOL_FLAG) {
5890 wol->supported = WAKE_MAGIC;
5892 wol->wolopts = WAKE_MAGIC;
5896 memset(&wol->sopass, 0, sizeof(wol->sopass));
5900 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5902 struct bnx2 *bp = netdev_priv(dev);
5904 if (wol->wolopts & ~WAKE_MAGIC)
5907 if (wol->wolopts & WAKE_MAGIC) {
5908 if (bp->flags & NO_WOL_FLAG)
5920 bnx2_nway_reset(struct net_device *dev)
5922 struct bnx2 *bp = netdev_priv(dev);
5925 if (!(bp->autoneg & AUTONEG_SPEED)) {
5929 spin_lock_bh(&bp->phy_lock);
5931 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5934 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5935 spin_unlock_bh(&bp->phy_lock);
5939 /* Force a link down visible on the other side */
5940 if (bp->phy_flags & PHY_SERDES_FLAG) {
5941 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5942 spin_unlock_bh(&bp->phy_lock);
5946 spin_lock_bh(&bp->phy_lock);
5948 bp->current_interval = SERDES_AN_TIMEOUT;
5949 bp->serdes_an_pending = 1;
5950 mod_timer(&bp->timer, jiffies + bp->current_interval);
5953 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5954 bmcr &= ~BMCR_LOOPBACK;
5955 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5957 spin_unlock_bh(&bp->phy_lock);
5963 bnx2_get_eeprom_len(struct net_device *dev)
5965 struct bnx2 *bp = netdev_priv(dev);
5967 if (bp->flash_info == NULL)
5970 return (int) bp->flash_size;
5974 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5977 struct bnx2 *bp = netdev_priv(dev);
5980 /* parameters already validated in ethtool_get_eeprom */
5982 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5988 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5991 struct bnx2 *bp = netdev_priv(dev);
5994 /* parameters already validated in ethtool_set_eeprom */
5996 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6002 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6004 struct bnx2 *bp = netdev_priv(dev);
6006 memset(coal, 0, sizeof(struct ethtool_coalesce));
6008 coal->rx_coalesce_usecs = bp->rx_ticks;
6009 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6010 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6011 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6013 coal->tx_coalesce_usecs = bp->tx_ticks;
6014 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6015 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6016 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6018 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6024 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6026 struct bnx2 *bp = netdev_priv(dev);
6028 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6029 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6031 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6032 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6034 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6035 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6037 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6038 if (bp->rx_quick_cons_trip_int > 0xff)
6039 bp->rx_quick_cons_trip_int = 0xff;
6041 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6042 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6044 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6045 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6047 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6048 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6050 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6051 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6054 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6055 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6056 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6057 bp->stats_ticks = USEC_PER_SEC;
6059 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6060 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6061 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6063 if (netif_running(bp->dev)) {
6064 bnx2_netif_stop(bp);
6066 bnx2_netif_start(bp);
6073 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6075 struct bnx2 *bp = netdev_priv(dev);
6077 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6078 ering->rx_mini_max_pending = 0;
6079 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6081 ering->rx_pending = bp->rx_ring_size;
6082 ering->rx_mini_pending = 0;
6083 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6085 ering->tx_max_pending = MAX_TX_DESC_CNT;
6086 ering->tx_pending = bp->tx_ring_size;
6090 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6092 if (netif_running(bp->dev)) {
6093 bnx2_netif_stop(bp);
6094 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6099 bnx2_set_rx_ring_size(bp, rx);
6100 bp->tx_ring_size = tx;
6102 if (netif_running(bp->dev)) {
6105 rc = bnx2_alloc_mem(bp);
6109 bnx2_netif_start(bp);
6115 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6117 struct bnx2 *bp = netdev_priv(dev);
6120 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6121 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6122 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6126 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6131 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6133 struct bnx2 *bp = netdev_priv(dev);
6135 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6136 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6137 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6141 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6143 struct bnx2 *bp = netdev_priv(dev);
6145 bp->req_flow_ctrl = 0;
6146 if (epause->rx_pause)
6147 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6148 if (epause->tx_pause)
6149 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6151 if (epause->autoneg) {
6152 bp->autoneg |= AUTONEG_FLOW_CTRL;
6155 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6158 spin_lock_bh(&bp->phy_lock);
6160 bnx2_setup_phy(bp, bp->phy_port);
6162 spin_unlock_bh(&bp->phy_lock);
6168 bnx2_get_rx_csum(struct net_device *dev)
6170 struct bnx2 *bp = netdev_priv(dev);
6176 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6178 struct bnx2 *bp = netdev_priv(dev);
6185 bnx2_set_tso(struct net_device *dev, u32 data)
6187 struct bnx2 *bp = netdev_priv(dev);
6190 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6191 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6192 dev->features |= NETIF_F_TSO6;
6194 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6199 #define BNX2_NUM_STATS 46
6202 char string[ETH_GSTRING_LEN];
6203 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6205 { "rx_error_bytes" },
6207 { "tx_error_bytes" },
6208 { "rx_ucast_packets" },
6209 { "rx_mcast_packets" },
6210 { "rx_bcast_packets" },
6211 { "tx_ucast_packets" },
6212 { "tx_mcast_packets" },
6213 { "tx_bcast_packets" },
6214 { "tx_mac_errors" },
6215 { "tx_carrier_errors" },
6216 { "rx_crc_errors" },
6217 { "rx_align_errors" },
6218 { "tx_single_collisions" },
6219 { "tx_multi_collisions" },
6221 { "tx_excess_collisions" },
6222 { "tx_late_collisions" },
6223 { "tx_total_collisions" },
6226 { "rx_undersize_packets" },
6227 { "rx_oversize_packets" },
6228 { "rx_64_byte_packets" },
6229 { "rx_65_to_127_byte_packets" },
6230 { "rx_128_to_255_byte_packets" },
6231 { "rx_256_to_511_byte_packets" },
6232 { "rx_512_to_1023_byte_packets" },
6233 { "rx_1024_to_1522_byte_packets" },
6234 { "rx_1523_to_9022_byte_packets" },
6235 { "tx_64_byte_packets" },
6236 { "tx_65_to_127_byte_packets" },
6237 { "tx_128_to_255_byte_packets" },
6238 { "tx_256_to_511_byte_packets" },
6239 { "tx_512_to_1023_byte_packets" },
6240 { "tx_1024_to_1522_byte_packets" },
6241 { "tx_1523_to_9022_byte_packets" },
6242 { "rx_xon_frames" },
6243 { "rx_xoff_frames" },
6244 { "tx_xon_frames" },
6245 { "tx_xoff_frames" },
6246 { "rx_mac_ctrl_frames" },
6247 { "rx_filtered_packets" },
6249 { "rx_fw_discards" },
6252 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6254 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6255 STATS_OFFSET32(stat_IfHCInOctets_hi),
6256 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6257 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6258 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6259 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6260 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6261 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6262 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6263 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6264 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6265 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6266 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6267 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6268 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6269 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6270 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6271 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6272 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6273 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6274 STATS_OFFSET32(stat_EtherStatsCollisions),
6275 STATS_OFFSET32(stat_EtherStatsFragments),
6276 STATS_OFFSET32(stat_EtherStatsJabbers),
6277 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6278 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6279 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6280 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6281 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6282 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6283 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6284 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6285 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6286 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6287 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6288 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6289 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6290 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6291 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6292 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6293 STATS_OFFSET32(stat_XonPauseFramesReceived),
6294 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6295 STATS_OFFSET32(stat_OutXonSent),
6296 STATS_OFFSET32(stat_OutXoffSent),
6297 STATS_OFFSET32(stat_MacControlFramesReceived),
6298 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6299 STATS_OFFSET32(stat_IfInMBUFDiscards),
6300 STATS_OFFSET32(stat_FwRxDrop),
6303 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6304 * skipped because of errata.
6306 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6307 8,0,8,8,8,8,8,8,8,8,
6308 4,0,4,4,4,4,4,4,4,4,
6309 4,4,4,4,4,4,4,4,4,4,
6310 4,4,4,4,4,4,4,4,4,4,
6314 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6315 8,0,8,8,8,8,8,8,8,8,
6316 4,4,4,4,4,4,4,4,4,4,
6317 4,4,4,4,4,4,4,4,4,4,
6318 4,4,4,4,4,4,4,4,4,4,
6322 #define BNX2_NUM_TESTS 6
6325 char string[ETH_GSTRING_LEN];
6326 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6327 { "register_test (offline)" },
6328 { "memory_test (offline)" },
6329 { "loopback_test (offline)" },
6330 { "nvram_test (online)" },
6331 { "interrupt_test (online)" },
6332 { "link_test (online)" },
6336 bnx2_get_sset_count(struct net_device *dev, int sset)
6340 return BNX2_NUM_TESTS;
6342 return BNX2_NUM_STATS;
6349 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6351 struct bnx2 *bp = netdev_priv(dev);
6353 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6354 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6357 bnx2_netif_stop(bp);
6358 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6361 if (bnx2_test_registers(bp) != 0) {
6363 etest->flags |= ETH_TEST_FL_FAILED;
6365 if (bnx2_test_memory(bp) != 0) {
6367 etest->flags |= ETH_TEST_FL_FAILED;
6369 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6370 etest->flags |= ETH_TEST_FL_FAILED;
6372 if (!netif_running(bp->dev)) {
6373 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6377 bnx2_netif_start(bp);
6380 /* wait for link up */
6381 for (i = 0; i < 7; i++) {
6384 msleep_interruptible(1000);
6388 if (bnx2_test_nvram(bp) != 0) {
6390 etest->flags |= ETH_TEST_FL_FAILED;
6392 if (bnx2_test_intr(bp) != 0) {
6394 etest->flags |= ETH_TEST_FL_FAILED;
6397 if (bnx2_test_link(bp) != 0) {
6399 etest->flags |= ETH_TEST_FL_FAILED;
6405 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6407 switch (stringset) {
6409 memcpy(buf, bnx2_stats_str_arr,
6410 sizeof(bnx2_stats_str_arr));
6413 memcpy(buf, bnx2_tests_str_arr,
6414 sizeof(bnx2_tests_str_arr));
6420 bnx2_get_ethtool_stats(struct net_device *dev,
6421 struct ethtool_stats *stats, u64 *buf)
6423 struct bnx2 *bp = netdev_priv(dev);
6425 u32 *hw_stats = (u32 *) bp->stats_blk;
6426 u8 *stats_len_arr = NULL;
6428 if (hw_stats == NULL) {
6429 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6433 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6434 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6435 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6436 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6437 stats_len_arr = bnx2_5706_stats_len_arr;
6439 stats_len_arr = bnx2_5708_stats_len_arr;
6441 for (i = 0; i < BNX2_NUM_STATS; i++) {
6442 if (stats_len_arr[i] == 0) {
6443 /* skip this counter */
6447 if (stats_len_arr[i] == 4) {
6448 /* 4-byte counter */
6450 *(hw_stats + bnx2_stats_offset_arr[i]);
6453 /* 8-byte counter */
6454 buf[i] = (((u64) *(hw_stats +
6455 bnx2_stats_offset_arr[i])) << 32) +
6456 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6461 bnx2_phys_id(struct net_device *dev, u32 data)
6463 struct bnx2 *bp = netdev_priv(dev);
6470 save = REG_RD(bp, BNX2_MISC_CFG);
6471 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6473 for (i = 0; i < (data * 2); i++) {
6475 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6478 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6479 BNX2_EMAC_LED_1000MB_OVERRIDE |
6480 BNX2_EMAC_LED_100MB_OVERRIDE |
6481 BNX2_EMAC_LED_10MB_OVERRIDE |
6482 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6483 BNX2_EMAC_LED_TRAFFIC);
6485 msleep_interruptible(500);
6486 if (signal_pending(current))
6489 REG_WR(bp, BNX2_EMAC_LED, 0);
6490 REG_WR(bp, BNX2_MISC_CFG, save);
6495 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6497 struct bnx2 *bp = netdev_priv(dev);
6499 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6500 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6502 return (ethtool_op_set_tx_csum(dev, data));
6505 static const struct ethtool_ops bnx2_ethtool_ops = {
6506 .get_settings = bnx2_get_settings,
6507 .set_settings = bnx2_set_settings,
6508 .get_drvinfo = bnx2_get_drvinfo,
6509 .get_regs_len = bnx2_get_regs_len,
6510 .get_regs = bnx2_get_regs,
6511 .get_wol = bnx2_get_wol,
6512 .set_wol = bnx2_set_wol,
6513 .nway_reset = bnx2_nway_reset,
6514 .get_link = ethtool_op_get_link,
6515 .get_eeprom_len = bnx2_get_eeprom_len,
6516 .get_eeprom = bnx2_get_eeprom,
6517 .set_eeprom = bnx2_set_eeprom,
6518 .get_coalesce = bnx2_get_coalesce,
6519 .set_coalesce = bnx2_set_coalesce,
6520 .get_ringparam = bnx2_get_ringparam,
6521 .set_ringparam = bnx2_set_ringparam,
6522 .get_pauseparam = bnx2_get_pauseparam,
6523 .set_pauseparam = bnx2_set_pauseparam,
6524 .get_rx_csum = bnx2_get_rx_csum,
6525 .set_rx_csum = bnx2_set_rx_csum,
6526 .set_tx_csum = bnx2_set_tx_csum,
6527 .set_sg = ethtool_op_set_sg,
6528 .set_tso = bnx2_set_tso,
6529 .self_test = bnx2_self_test,
6530 .get_strings = bnx2_get_strings,
6531 .phys_id = bnx2_phys_id,
6532 .get_ethtool_stats = bnx2_get_ethtool_stats,
6533 .get_sset_count = bnx2_get_sset_count,
6536 /* Called with rtnl_lock */
6538 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6540 struct mii_ioctl_data *data = if_mii(ifr);
6541 struct bnx2 *bp = netdev_priv(dev);
6546 data->phy_id = bp->phy_addr;
6552 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6555 if (!netif_running(dev))
6558 spin_lock_bh(&bp->phy_lock);
6559 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6560 spin_unlock_bh(&bp->phy_lock);
6562 data->val_out = mii_regval;
6568 if (!capable(CAP_NET_ADMIN))
6571 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6574 if (!netif_running(dev))
6577 spin_lock_bh(&bp->phy_lock);
6578 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6579 spin_unlock_bh(&bp->phy_lock);
6590 /* Called with rtnl_lock */
6592 bnx2_change_mac_addr(struct net_device *dev, void *p)
6594 struct sockaddr *addr = p;
6595 struct bnx2 *bp = netdev_priv(dev);
6597 if (!is_valid_ether_addr(addr->sa_data))
6600 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6601 if (netif_running(dev))
6602 bnx2_set_mac_addr(bp);
6607 /* Called with rtnl_lock */
6609 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6611 struct bnx2 *bp = netdev_priv(dev);
6613 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6614 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6618 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6621 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6623 poll_bnx2(struct net_device *dev)
6625 struct bnx2 *bp = netdev_priv(dev);
6627 disable_irq(bp->pdev->irq);
6628 bnx2_interrupt(bp->pdev->irq, dev);
6629 enable_irq(bp->pdev->irq);
6633 static void __devinit
6634 bnx2_get_5709_media(struct bnx2 *bp)
6636 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6637 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6640 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6642 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6643 bp->phy_flags |= PHY_SERDES_FLAG;
6647 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6648 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6650 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6652 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6657 bp->phy_flags |= PHY_SERDES_FLAG;
6665 bp->phy_flags |= PHY_SERDES_FLAG;
6671 static void __devinit
6672 bnx2_get_pci_speed(struct bnx2 *bp)
6676 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6677 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6680 bp->flags |= PCIX_FLAG;
6682 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6684 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6686 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6687 bp->bus_speed_mhz = 133;
6690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6691 bp->bus_speed_mhz = 100;
6694 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6695 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6696 bp->bus_speed_mhz = 66;
6699 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6700 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6701 bp->bus_speed_mhz = 50;
6704 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6705 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6706 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6707 bp->bus_speed_mhz = 33;
6712 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6713 bp->bus_speed_mhz = 66;
6715 bp->bus_speed_mhz = 33;
6718 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6719 bp->flags |= PCI_32BIT_FLAG;
6723 static int __devinit
6724 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6727 unsigned long mem_len;
6730 u64 dma_mask, persist_dma_mask;
6732 SET_NETDEV_DEV(dev, &pdev->dev);
6733 bp = netdev_priv(dev);
6738 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6739 rc = pci_enable_device(pdev);
6741 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6745 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6747 "Cannot find PCI device base address, aborting.\n");
6749 goto err_out_disable;
6752 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6754 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6755 goto err_out_disable;
6758 pci_set_master(pdev);
6760 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6761 if (bp->pm_cap == 0) {
6763 "Cannot find power management capability, aborting.\n");
6765 goto err_out_release;
6771 spin_lock_init(&bp->phy_lock);
6772 spin_lock_init(&bp->indirect_lock);
6773 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6775 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6776 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6777 dev->mem_end = dev->mem_start + mem_len;
6778 dev->irq = pdev->irq;
6780 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6783 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6785 goto err_out_release;
6788 /* Configure byte swap and enable write to the reg_window registers.
6789 * Rely on CPU to do target byte swapping on big endian systems
6790 * The chip's target access swapping will not swap all accesses
6792 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6793 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6794 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6796 bnx2_set_power_state(bp, PCI_D0);
6798 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6800 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6801 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6803 "Cannot find PCIE capability, aborting.\n");
6807 bp->flags |= PCIE_FLAG;
6809 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6810 if (bp->pcix_cap == 0) {
6812 "Cannot find PCIX capability, aborting.\n");
6818 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6819 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6820 bp->flags |= MSI_CAP_FLAG;
6823 /* 5708 cannot support DMA addresses > 40-bit. */
6824 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6825 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6827 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6829 /* Configure DMA attributes. */
6830 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6831 dev->features |= NETIF_F_HIGHDMA;
6832 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6835 "pci_set_consistent_dma_mask failed, aborting.\n");
6838 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6839 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6843 if (!(bp->flags & PCIE_FLAG))
6844 bnx2_get_pci_speed(bp);
6846 /* 5706A0 may falsely detect SERR and PERR. */
6847 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6848 reg = REG_RD(bp, PCI_COMMAND);
6849 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6850 REG_WR(bp, PCI_COMMAND, reg);
6852 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6853 !(bp->flags & PCIX_FLAG)) {
6856 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6860 bnx2_init_nvram(bp);
6862 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6864 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6865 BNX2_SHM_HDR_SIGNATURE_SIG) {
6866 u32 off = PCI_FUNC(pdev->devfn) << 2;
6868 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6870 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6872 /* Get the permanent MAC address. First we need to make sure the
6873 * firmware is actually running.
6875 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6877 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6878 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6879 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6884 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6885 for (i = 0, j = 0; i < 3; i++) {
6888 num = (u8) (reg >> (24 - (i * 8)));
6889 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6890 if (num >= k || !skip0 || k == 1) {
6891 bp->fw_version[j++] = (num / k) + '0';
6896 bp->fw_version[j++] = '.';
6898 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6899 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6902 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6903 bp->flags |= ASF_ENABLE_FLAG;
6905 for (i = 0; i < 30; i++) {
6906 reg = REG_RD_IND(bp, bp->shmem_base +
6907 BNX2_BC_STATE_CONDITION);
6908 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6913 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6914 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6915 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6916 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6918 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6920 bp->fw_version[j++] = ' ';
6921 for (i = 0; i < 3; i++) {
6922 reg = REG_RD_IND(bp, addr + i * 4);
6924 memcpy(&bp->fw_version[j], ®, 4);
6929 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6930 bp->mac_addr[0] = (u8) (reg >> 8);
6931 bp->mac_addr[1] = (u8) reg;
6933 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6934 bp->mac_addr[2] = (u8) (reg >> 24);
6935 bp->mac_addr[3] = (u8) (reg >> 16);
6936 bp->mac_addr[4] = (u8) (reg >> 8);
6937 bp->mac_addr[5] = (u8) reg;
6939 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6941 bp->tx_ring_size = MAX_TX_DESC_CNT;
6942 bnx2_set_rx_ring_size(bp, 255);
6946 bp->tx_quick_cons_trip_int = 20;
6947 bp->tx_quick_cons_trip = 20;
6948 bp->tx_ticks_int = 80;
6951 bp->rx_quick_cons_trip_int = 6;
6952 bp->rx_quick_cons_trip = 6;
6953 bp->rx_ticks_int = 18;
6956 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6958 bp->timer_interval = HZ;
6959 bp->current_interval = HZ;
6963 /* Disable WOL support if we are running on a SERDES chip. */
6964 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6965 bnx2_get_5709_media(bp);
6966 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6967 bp->phy_flags |= PHY_SERDES_FLAG;
6969 bp->phy_port = PORT_TP;
6970 if (bp->phy_flags & PHY_SERDES_FLAG) {
6971 bp->phy_port = PORT_FIBRE;
6972 reg = REG_RD_IND(bp, bp->shmem_base +
6973 BNX2_SHARED_HW_CFG_CONFIG);
6974 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6975 bp->flags |= NO_WOL_FLAG;
6978 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6980 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6981 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6983 bnx2_init_remote_phy(bp);
6985 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6986 CHIP_NUM(bp) == CHIP_NUM_5708)
6987 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6988 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6989 (CHIP_REV(bp) == CHIP_REV_Ax ||
6990 CHIP_REV(bp) == CHIP_REV_Bx))
6991 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6993 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6994 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6995 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6996 bp->flags |= NO_WOL_FLAG;
7000 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7001 bp->tx_quick_cons_trip_int =
7002 bp->tx_quick_cons_trip;
7003 bp->tx_ticks_int = bp->tx_ticks;
7004 bp->rx_quick_cons_trip_int =
7005 bp->rx_quick_cons_trip;
7006 bp->rx_ticks_int = bp->rx_ticks;
7007 bp->comp_prod_trip_int = bp->comp_prod_trip;
7008 bp->com_ticks_int = bp->com_ticks;
7009 bp->cmd_ticks_int = bp->cmd_ticks;
7012 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7014 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7015 * with byte enables disabled on the unused 32-bit word. This is legal
7016 * but causes problems on the AMD 8132 which will eventually stop
7017 * responding after a while.
7019 * AMD believes this incompatibility is unique to the 5706, and
7020 * prefers to locally disable MSI rather than globally disabling it.
7022 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7023 struct pci_dev *amd_8132 = NULL;
7025 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7026 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7029 if (amd_8132->revision >= 0x10 &&
7030 amd_8132->revision <= 0x13) {
7032 pci_dev_put(amd_8132);
7038 bnx2_set_default_link(bp);
7039 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7041 init_timer(&bp->timer);
7042 bp->timer.expires = RUN_AT(bp->timer_interval);
7043 bp->timer.data = (unsigned long) bp;
7044 bp->timer.function = bnx2_timer;
7050 iounmap(bp->regview);
7055 pci_release_regions(pdev);
7058 pci_disable_device(pdev);
7059 pci_set_drvdata(pdev, NULL);
7065 static char * __devinit
7066 bnx2_bus_string(struct bnx2 *bp, char *str)
7070 if (bp->flags & PCIE_FLAG) {
7071 s += sprintf(s, "PCI Express");
7073 s += sprintf(s, "PCI");
7074 if (bp->flags & PCIX_FLAG)
7075 s += sprintf(s, "-X");
7076 if (bp->flags & PCI_32BIT_FLAG)
7077 s += sprintf(s, " 32-bit");
7079 s += sprintf(s, " 64-bit");
7080 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7085 static int __devinit
7086 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7088 static int version_printed = 0;
7089 struct net_device *dev = NULL;
7093 DECLARE_MAC_BUF(mac);
7095 if (version_printed++ == 0)
7096 printk(KERN_INFO "%s", version);
7098 /* dev zeroed in init_etherdev */
7099 dev = alloc_etherdev(sizeof(*bp));
7104 rc = bnx2_init_board(pdev, dev);
7110 dev->open = bnx2_open;
7111 dev->hard_start_xmit = bnx2_start_xmit;
7112 dev->stop = bnx2_close;
7113 dev->get_stats = bnx2_get_stats;
7114 dev->set_multicast_list = bnx2_set_rx_mode;
7115 dev->do_ioctl = bnx2_ioctl;
7116 dev->set_mac_address = bnx2_change_mac_addr;
7117 dev->change_mtu = bnx2_change_mtu;
7118 dev->tx_timeout = bnx2_tx_timeout;
7119 dev->watchdog_timeo = TX_TIMEOUT;
7121 dev->vlan_rx_register = bnx2_vlan_rx_register;
7123 dev->ethtool_ops = &bnx2_ethtool_ops;
7125 bp = netdev_priv(dev);
7126 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
7128 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7129 dev->poll_controller = poll_bnx2;
7132 pci_set_drvdata(pdev, dev);
7134 memcpy(dev->dev_addr, bp->mac_addr, 6);
7135 memcpy(dev->perm_addr, bp->mac_addr, 6);
7136 bp->name = board_info[ent->driver_data].name;
7138 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7139 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7140 dev->features |= NETIF_F_IPV6_CSUM;
7143 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7145 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7146 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7147 dev->features |= NETIF_F_TSO6;
7149 if ((rc = register_netdev(dev))) {
7150 dev_err(&pdev->dev, "Cannot register net device\n");
7152 iounmap(bp->regview);
7153 pci_release_regions(pdev);
7154 pci_disable_device(pdev);
7155 pci_set_drvdata(pdev, NULL);
7160 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7161 "IRQ %d, node addr %s\n",
7164 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7165 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7166 bnx2_bus_string(bp, str),
7168 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7173 static void __devexit
7174 bnx2_remove_one(struct pci_dev *pdev)
7176 struct net_device *dev = pci_get_drvdata(pdev);
7177 struct bnx2 *bp = netdev_priv(dev);
7179 flush_scheduled_work();
7181 unregister_netdev(dev);
7184 iounmap(bp->regview);
7187 pci_release_regions(pdev);
7188 pci_disable_device(pdev);
7189 pci_set_drvdata(pdev, NULL);
7193 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7195 struct net_device *dev = pci_get_drvdata(pdev);
7196 struct bnx2 *bp = netdev_priv(dev);
7199 /* PCI register 4 needs to be saved whether netif_running() or not.
7200 * MSI address and data need to be saved if using MSI and
7203 pci_save_state(pdev);
7204 if (!netif_running(dev))
7207 flush_scheduled_work();
7208 bnx2_netif_stop(bp);
7209 netif_device_detach(dev);
7210 del_timer_sync(&bp->timer);
7211 if (bp->flags & NO_WOL_FLAG)
7212 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7214 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7216 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7217 bnx2_reset_chip(bp, reset_code);
7219 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7224 bnx2_resume(struct pci_dev *pdev)
7226 struct net_device *dev = pci_get_drvdata(pdev);
7227 struct bnx2 *bp = netdev_priv(dev);
7229 pci_restore_state(pdev);
7230 if (!netif_running(dev))
7233 bnx2_set_power_state(bp, PCI_D0);
7234 netif_device_attach(dev);
7236 bnx2_netif_start(bp);
7240 static struct pci_driver bnx2_pci_driver = {
7241 .name = DRV_MODULE_NAME,
7242 .id_table = bnx2_pci_tbl,
7243 .probe = bnx2_init_one,
7244 .remove = __devexit_p(bnx2_remove_one),
7245 .suspend = bnx2_suspend,
7246 .resume = bnx2_resume,
7249 static int __init bnx2_init(void)
7251 return pci_register_driver(&bnx2_pci_driver);
7254 static void __exit bnx2_cleanup(void)
7256 pci_unregister_driver(&bnx2_pci_driver);
7259 module_init(bnx2_init);
7260 module_exit(bnx2_cleanup);