1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.6"
60 #define DRV_MODULE_RELDATE "May 16, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = txr->tx_prod - txr->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
271 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
277 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
288 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) {
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300 REG_WR(bp, BNX2_CTX_DATA, val);
302 spin_unlock_bh(&bp->indirect_lock);
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 = (bp->phy_addr << 21) | (reg << 16) |
322 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323 BNX2_EMAC_MDIO_COMM_START_BUSY;
324 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
326 for (i = 0; i < 50; i++) {
329 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
340 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
353 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
372 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
383 for (i = 0; i < 50; i++) {
386 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
402 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412 bnx2_disable_int(struct bnx2 *bp)
415 struct bnx2_napi *bnapi;
417 for (i = 0; i < bp->irq_nvecs; i++) {
418 bnapi = &bp->bnx2_napi[i];
419 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
422 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
426 bnx2_enable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
434 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437 bnapi->last_status_idx);
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
447 bnx2_disable_int_sync(struct bnx2 *bp)
451 atomic_inc(&bp->intr_sem);
452 bnx2_disable_int(bp);
453 for (i = 0; i < bp->irq_nvecs; i++)
454 synchronize_irq(bp->irq_tbl[i].vector);
458 bnx2_napi_disable(struct bnx2 *bp)
462 for (i = 0; i < bp->irq_nvecs; i++)
463 napi_disable(&bp->bnx2_napi[i].napi);
467 bnx2_napi_enable(struct bnx2 *bp)
471 for (i = 0; i < bp->irq_nvecs; i++)
472 napi_enable(&bp->bnx2_napi[i].napi);
476 bnx2_netif_stop(struct bnx2 *bp)
478 bnx2_disable_int_sync(bp);
479 if (netif_running(bp->dev)) {
480 bnx2_napi_disable(bp);
481 netif_tx_disable(bp->dev);
482 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 bnx2_netif_start(struct bnx2 *bp)
489 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev);
492 bnx2_napi_enable(bp);
499 bnx2_free_tx_mem(struct bnx2 *bp)
503 for (i = 0; i < bp->num_tx_rings; i++) {
504 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
507 if (txr->tx_desc_ring) {
508 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
510 txr->tx_desc_mapping);
511 txr->tx_desc_ring = NULL;
513 kfree(txr->tx_buf_ring);
514 txr->tx_buf_ring = NULL;
519 bnx2_free_rx_mem(struct bnx2 *bp)
523 for (i = 0; i < bp->num_rx_rings; i++) {
524 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
528 for (j = 0; j < bp->rx_max_ring; j++) {
529 if (rxr->rx_desc_ring[j])
530 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531 rxr->rx_desc_ring[j],
532 rxr->rx_desc_mapping[j]);
533 rxr->rx_desc_ring[j] = NULL;
535 if (rxr->rx_buf_ring)
536 vfree(rxr->rx_buf_ring);
537 rxr->rx_buf_ring = NULL;
539 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540 if (rxr->rx_pg_desc_ring[j])
541 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542 rxr->rx_pg_desc_ring[i],
543 rxr->rx_pg_desc_mapping[i]);
544 rxr->rx_pg_desc_ring[i] = NULL;
547 vfree(rxr->rx_pg_ring);
548 rxr->rx_pg_ring = NULL;
553 bnx2_alloc_tx_mem(struct bnx2 *bp)
557 for (i = 0; i < bp->num_tx_rings; i++) {
558 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
559 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
561 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
562 if (txr->tx_buf_ring == NULL)
566 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
567 &txr->tx_desc_mapping);
568 if (txr->tx_desc_ring == NULL)
575 bnx2_alloc_rx_mem(struct bnx2 *bp)
579 for (i = 0; i < bp->num_rx_rings; i++) {
580 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
585 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586 if (rxr->rx_buf_ring == NULL)
589 memset(rxr->rx_buf_ring, 0,
590 SW_RXBD_RING_SIZE * bp->rx_max_ring);
592 for (j = 0; j < bp->rx_max_ring; j++) {
593 rxr->rx_desc_ring[j] =
594 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595 &rxr->rx_desc_mapping[j]);
596 if (rxr->rx_desc_ring[j] == NULL)
601 if (bp->rx_pg_ring_size) {
602 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
604 if (rxr->rx_pg_ring == NULL)
607 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
611 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612 rxr->rx_pg_desc_ring[j] =
613 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614 &rxr->rx_pg_desc_mapping[j]);
615 if (rxr->rx_pg_desc_ring[j] == NULL)
624 bnx2_free_mem(struct bnx2 *bp)
628 bnx2_free_tx_mem(bp);
629 bnx2_free_rx_mem(bp);
631 for (i = 0; i < bp->ctx_pages; i++) {
632 if (bp->ctx_blk[i]) {
633 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
635 bp->ctx_blk_mapping[i]);
636 bp->ctx_blk[i] = NULL;
639 if (bp->status_blk) {
640 pci_free_consistent(bp->pdev, bp->status_stats_size,
641 bp->status_blk, bp->status_blk_mapping);
642 bp->status_blk = NULL;
643 bp->stats_blk = NULL;
648 bnx2_alloc_mem(struct bnx2 *bp)
650 int i, status_blk_size, err;
652 /* Combine status and statistics blocks into one allocation. */
653 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
654 if (bp->flags & BNX2_FLAG_MSIX_CAP)
655 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
656 BNX2_SBLK_MSIX_ALIGN_SIZE);
657 bp->status_stats_size = status_blk_size +
658 sizeof(struct statistics_block);
660 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
661 &bp->status_blk_mapping);
662 if (bp->status_blk == NULL)
665 memset(bp->status_blk, 0, bp->status_stats_size);
667 bp->bnx2_napi[0].status_blk = bp->status_blk;
668 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
669 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
670 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
672 bnapi->status_blk_msix = (void *)
673 ((unsigned long) bp->status_blk +
674 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
675 bnapi->int_num = i << 24;
679 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
682 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
684 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
685 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
686 if (bp->ctx_pages == 0)
688 for (i = 0; i < bp->ctx_pages; i++) {
689 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
691 &bp->ctx_blk_mapping[i]);
692 if (bp->ctx_blk[i] == NULL)
697 err = bnx2_alloc_rx_mem(bp);
701 err = bnx2_alloc_tx_mem(bp);
713 bnx2_report_fw_link(struct bnx2 *bp)
715 u32 fw_link_status = 0;
717 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
723 switch (bp->line_speed) {
725 if (bp->duplex == DUPLEX_HALF)
726 fw_link_status = BNX2_LINK_STATUS_10HALF;
728 fw_link_status = BNX2_LINK_STATUS_10FULL;
731 if (bp->duplex == DUPLEX_HALF)
732 fw_link_status = BNX2_LINK_STATUS_100HALF;
734 fw_link_status = BNX2_LINK_STATUS_100FULL;
737 if (bp->duplex == DUPLEX_HALF)
738 fw_link_status = BNX2_LINK_STATUS_1000HALF;
740 fw_link_status = BNX2_LINK_STATUS_1000FULL;
743 if (bp->duplex == DUPLEX_HALF)
744 fw_link_status = BNX2_LINK_STATUS_2500HALF;
746 fw_link_status = BNX2_LINK_STATUS_2500FULL;
750 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
753 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
755 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
756 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
758 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
759 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
760 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
762 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
766 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
768 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
772 bnx2_xceiver_str(struct bnx2 *bp)
774 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
775 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
780 bnx2_report_link(struct bnx2 *bp)
783 netif_carrier_on(bp->dev);
784 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
785 bnx2_xceiver_str(bp));
787 printk("%d Mbps ", bp->line_speed);
789 if (bp->duplex == DUPLEX_FULL)
790 printk("full duplex");
792 printk("half duplex");
795 if (bp->flow_ctrl & FLOW_CTRL_RX) {
796 printk(", receive ");
797 if (bp->flow_ctrl & FLOW_CTRL_TX)
798 printk("& transmit ");
801 printk(", transmit ");
803 printk("flow control ON");
808 netif_carrier_off(bp->dev);
809 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
810 bnx2_xceiver_str(bp));
813 bnx2_report_fw_link(bp);
817 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
819 u32 local_adv, remote_adv;
822 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
823 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
825 if (bp->duplex == DUPLEX_FULL) {
826 bp->flow_ctrl = bp->req_flow_ctrl;
831 if (bp->duplex != DUPLEX_FULL) {
835 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
836 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
839 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
840 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
841 bp->flow_ctrl |= FLOW_CTRL_TX;
842 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
843 bp->flow_ctrl |= FLOW_CTRL_RX;
847 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
848 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
850 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
851 u32 new_local_adv = 0;
852 u32 new_remote_adv = 0;
854 if (local_adv & ADVERTISE_1000XPAUSE)
855 new_local_adv |= ADVERTISE_PAUSE_CAP;
856 if (local_adv & ADVERTISE_1000XPSE_ASYM)
857 new_local_adv |= ADVERTISE_PAUSE_ASYM;
858 if (remote_adv & ADVERTISE_1000XPAUSE)
859 new_remote_adv |= ADVERTISE_PAUSE_CAP;
860 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
861 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
863 local_adv = new_local_adv;
864 remote_adv = new_remote_adv;
867 /* See Table 28B-3 of 802.3ab-1999 spec. */
868 if (local_adv & ADVERTISE_PAUSE_CAP) {
869 if(local_adv & ADVERTISE_PAUSE_ASYM) {
870 if (remote_adv & ADVERTISE_PAUSE_CAP) {
871 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
873 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
874 bp->flow_ctrl = FLOW_CTRL_RX;
878 if (remote_adv & ADVERTISE_PAUSE_CAP) {
879 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
883 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
884 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
885 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
887 bp->flow_ctrl = FLOW_CTRL_TX;
893 bnx2_5709s_linkup(struct bnx2 *bp)
899 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
900 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
901 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
903 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
904 bp->line_speed = bp->req_line_speed;
905 bp->duplex = bp->req_duplex;
908 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
910 case MII_BNX2_GP_TOP_AN_SPEED_10:
911 bp->line_speed = SPEED_10;
913 case MII_BNX2_GP_TOP_AN_SPEED_100:
914 bp->line_speed = SPEED_100;
916 case MII_BNX2_GP_TOP_AN_SPEED_1G:
917 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
918 bp->line_speed = SPEED_1000;
920 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
921 bp->line_speed = SPEED_2500;
924 if (val & MII_BNX2_GP_TOP_AN_FD)
925 bp->duplex = DUPLEX_FULL;
927 bp->duplex = DUPLEX_HALF;
932 bnx2_5708s_linkup(struct bnx2 *bp)
937 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
938 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
939 case BCM5708S_1000X_STAT1_SPEED_10:
940 bp->line_speed = SPEED_10;
942 case BCM5708S_1000X_STAT1_SPEED_100:
943 bp->line_speed = SPEED_100;
945 case BCM5708S_1000X_STAT1_SPEED_1G:
946 bp->line_speed = SPEED_1000;
948 case BCM5708S_1000X_STAT1_SPEED_2G5:
949 bp->line_speed = SPEED_2500;
952 if (val & BCM5708S_1000X_STAT1_FD)
953 bp->duplex = DUPLEX_FULL;
955 bp->duplex = DUPLEX_HALF;
961 bnx2_5706s_linkup(struct bnx2 *bp)
963 u32 bmcr, local_adv, remote_adv, common;
966 bp->line_speed = SPEED_1000;
968 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
969 if (bmcr & BMCR_FULLDPLX) {
970 bp->duplex = DUPLEX_FULL;
973 bp->duplex = DUPLEX_HALF;
976 if (!(bmcr & BMCR_ANENABLE)) {
980 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
981 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
983 common = local_adv & remote_adv;
984 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
986 if (common & ADVERTISE_1000XFULL) {
987 bp->duplex = DUPLEX_FULL;
990 bp->duplex = DUPLEX_HALF;
998 bnx2_copper_linkup(struct bnx2 *bp)
1002 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1003 if (bmcr & BMCR_ANENABLE) {
1004 u32 local_adv, remote_adv, common;
1006 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1007 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1009 common = local_adv & (remote_adv >> 2);
1010 if (common & ADVERTISE_1000FULL) {
1011 bp->line_speed = SPEED_1000;
1012 bp->duplex = DUPLEX_FULL;
1014 else if (common & ADVERTISE_1000HALF) {
1015 bp->line_speed = SPEED_1000;
1016 bp->duplex = DUPLEX_HALF;
1019 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1020 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1022 common = local_adv & remote_adv;
1023 if (common & ADVERTISE_100FULL) {
1024 bp->line_speed = SPEED_100;
1025 bp->duplex = DUPLEX_FULL;
1027 else if (common & ADVERTISE_100HALF) {
1028 bp->line_speed = SPEED_100;
1029 bp->duplex = DUPLEX_HALF;
1031 else if (common & ADVERTISE_10FULL) {
1032 bp->line_speed = SPEED_10;
1033 bp->duplex = DUPLEX_FULL;
1035 else if (common & ADVERTISE_10HALF) {
1036 bp->line_speed = SPEED_10;
1037 bp->duplex = DUPLEX_HALF;
1046 if (bmcr & BMCR_SPEED100) {
1047 bp->line_speed = SPEED_100;
1050 bp->line_speed = SPEED_10;
1052 if (bmcr & BMCR_FULLDPLX) {
1053 bp->duplex = DUPLEX_FULL;
1056 bp->duplex = DUPLEX_HALF;
1064 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1066 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1068 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1069 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1072 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1073 u32 lo_water, hi_water;
1075 if (bp->flow_ctrl & FLOW_CTRL_TX)
1076 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1078 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1079 if (lo_water >= bp->rx_ring_size)
1082 hi_water = bp->rx_ring_size / 4;
1084 if (hi_water <= lo_water)
1087 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1088 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1092 else if (hi_water == 0)
1094 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1096 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1100 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1105 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1108 bnx2_init_rx_context(bp, cid);
1113 bnx2_set_mac_link(struct bnx2 *bp)
1117 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1118 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1119 (bp->duplex == DUPLEX_HALF)) {
1120 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1123 /* Configure the EMAC mode register. */
1124 val = REG_RD(bp, BNX2_EMAC_MODE);
1126 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1127 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1128 BNX2_EMAC_MODE_25G_MODE);
1131 switch (bp->line_speed) {
1133 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1134 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1139 val |= BNX2_EMAC_MODE_PORT_MII;
1142 val |= BNX2_EMAC_MODE_25G_MODE;
1145 val |= BNX2_EMAC_MODE_PORT_GMII;
1150 val |= BNX2_EMAC_MODE_PORT_GMII;
1153 /* Set the MAC to operate in the appropriate duplex mode. */
1154 if (bp->duplex == DUPLEX_HALF)
1155 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1156 REG_WR(bp, BNX2_EMAC_MODE, val);
1158 /* Enable/disable rx PAUSE. */
1159 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1161 if (bp->flow_ctrl & FLOW_CTRL_RX)
1162 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1163 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1165 /* Enable/disable tx PAUSE. */
1166 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1167 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1169 if (bp->flow_ctrl & FLOW_CTRL_TX)
1170 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1171 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1173 /* Acknowledge the interrupt. */
1174 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1176 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1177 bnx2_init_all_rx_contexts(bp);
1183 bnx2_enable_bmsr1(struct bnx2 *bp)
1185 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1186 (CHIP_NUM(bp) == CHIP_NUM_5709))
1187 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1188 MII_BNX2_BLK_ADDR_GP_STATUS);
1192 bnx2_disable_bmsr1(struct bnx2 *bp)
1194 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1195 (CHIP_NUM(bp) == CHIP_NUM_5709))
1196 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1197 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1201 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1206 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1209 if (bp->autoneg & AUTONEG_SPEED)
1210 bp->advertising |= ADVERTISED_2500baseX_Full;
1212 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1213 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1215 bnx2_read_phy(bp, bp->mii_up1, &up1);
1216 if (!(up1 & BCM5708S_UP1_2G5)) {
1217 up1 |= BCM5708S_UP1_2G5;
1218 bnx2_write_phy(bp, bp->mii_up1, up1);
1222 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1223 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1224 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1230 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1235 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1238 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1239 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1241 bnx2_read_phy(bp, bp->mii_up1, &up1);
1242 if (up1 & BCM5708S_UP1_2G5) {
1243 up1 &= ~BCM5708S_UP1_2G5;
1244 bnx2_write_phy(bp, bp->mii_up1, up1);
1248 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1249 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1250 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1256 bnx2_enable_forced_2g5(struct bnx2 *bp)
1260 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1263 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1266 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1267 MII_BNX2_BLK_ADDR_SERDES_DIG);
1268 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1269 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1270 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1271 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1273 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1274 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1275 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1277 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1278 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1279 bmcr |= BCM5708S_BMCR_FORCE_2500;
1282 if (bp->autoneg & AUTONEG_SPEED) {
1283 bmcr &= ~BMCR_ANENABLE;
1284 if (bp->req_duplex == DUPLEX_FULL)
1285 bmcr |= BMCR_FULLDPLX;
1287 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1291 bnx2_disable_forced_2g5(struct bnx2 *bp)
1295 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1298 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1301 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1302 MII_BNX2_BLK_ADDR_SERDES_DIG);
1303 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1304 val &= ~MII_BNX2_SD_MISC1_FORCE;
1305 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1307 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1308 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1309 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1311 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1312 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1313 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1316 if (bp->autoneg & AUTONEG_SPEED)
1317 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1318 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1322 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1326 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1327 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1329 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1331 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1335 bnx2_set_link(struct bnx2 *bp)
1340 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1345 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1348 link_up = bp->link_up;
1350 bnx2_enable_bmsr1(bp);
1351 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1352 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1353 bnx2_disable_bmsr1(bp);
1355 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1356 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1359 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1360 bnx2_5706s_force_link_dn(bp, 0);
1361 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1363 val = REG_RD(bp, BNX2_EMAC_STATUS);
1365 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1366 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1367 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1369 if ((val & BNX2_EMAC_STATUS_LINK) &&
1370 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1371 bmsr |= BMSR_LSTATUS;
1373 bmsr &= ~BMSR_LSTATUS;
1376 if (bmsr & BMSR_LSTATUS) {
1379 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1380 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1381 bnx2_5706s_linkup(bp);
1382 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1383 bnx2_5708s_linkup(bp);
1384 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1385 bnx2_5709s_linkup(bp);
1388 bnx2_copper_linkup(bp);
1390 bnx2_resolve_flow_ctrl(bp);
1393 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1394 (bp->autoneg & AUTONEG_SPEED))
1395 bnx2_disable_forced_2g5(bp);
1397 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1400 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1401 bmcr |= BMCR_ANENABLE;
1402 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1404 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1409 if (bp->link_up != link_up) {
1410 bnx2_report_link(bp);
1413 bnx2_set_mac_link(bp);
1419 bnx2_reset_phy(struct bnx2 *bp)
1424 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1426 #define PHY_RESET_MAX_WAIT 100
1427 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1430 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1431 if (!(reg & BMCR_RESET)) {
1436 if (i == PHY_RESET_MAX_WAIT) {
1443 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1447 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1448 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1450 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1451 adv = ADVERTISE_1000XPAUSE;
1454 adv = ADVERTISE_PAUSE_CAP;
1457 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1458 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1459 adv = ADVERTISE_1000XPSE_ASYM;
1462 adv = ADVERTISE_PAUSE_ASYM;
1465 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1466 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1467 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1470 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1476 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1479 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1481 u32 speed_arg = 0, pause_adv;
1483 pause_adv = bnx2_phy_get_pause_adv(bp);
1485 if (bp->autoneg & AUTONEG_SPEED) {
1486 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1487 if (bp->advertising & ADVERTISED_10baseT_Half)
1488 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1489 if (bp->advertising & ADVERTISED_10baseT_Full)
1490 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1491 if (bp->advertising & ADVERTISED_100baseT_Half)
1492 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1493 if (bp->advertising & ADVERTISED_100baseT_Full)
1494 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1495 if (bp->advertising & ADVERTISED_1000baseT_Full)
1496 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1497 if (bp->advertising & ADVERTISED_2500baseX_Full)
1498 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1500 if (bp->req_line_speed == SPEED_2500)
1501 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1502 else if (bp->req_line_speed == SPEED_1000)
1503 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1504 else if (bp->req_line_speed == SPEED_100) {
1505 if (bp->req_duplex == DUPLEX_FULL)
1506 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1508 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1509 } else if (bp->req_line_speed == SPEED_10) {
1510 if (bp->req_duplex == DUPLEX_FULL)
1511 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1513 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1517 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1518 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1519 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1520 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1522 if (port == PORT_TP)
1523 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1524 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1526 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1528 spin_unlock_bh(&bp->phy_lock);
1529 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1530 spin_lock_bh(&bp->phy_lock);
1536 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1541 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1542 return (bnx2_setup_remote_phy(bp, port));
1544 if (!(bp->autoneg & AUTONEG_SPEED)) {
1546 int force_link_down = 0;
1548 if (bp->req_line_speed == SPEED_2500) {
1549 if (!bnx2_test_and_enable_2g5(bp))
1550 force_link_down = 1;
1551 } else if (bp->req_line_speed == SPEED_1000) {
1552 if (bnx2_test_and_disable_2g5(bp))
1553 force_link_down = 1;
1555 bnx2_read_phy(bp, bp->mii_adv, &adv);
1556 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1558 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1559 new_bmcr = bmcr & ~BMCR_ANENABLE;
1560 new_bmcr |= BMCR_SPEED1000;
1562 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1563 if (bp->req_line_speed == SPEED_2500)
1564 bnx2_enable_forced_2g5(bp);
1565 else if (bp->req_line_speed == SPEED_1000) {
1566 bnx2_disable_forced_2g5(bp);
1567 new_bmcr &= ~0x2000;
1570 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1571 if (bp->req_line_speed == SPEED_2500)
1572 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1574 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1577 if (bp->req_duplex == DUPLEX_FULL) {
1578 adv |= ADVERTISE_1000XFULL;
1579 new_bmcr |= BMCR_FULLDPLX;
1582 adv |= ADVERTISE_1000XHALF;
1583 new_bmcr &= ~BMCR_FULLDPLX;
1585 if ((new_bmcr != bmcr) || (force_link_down)) {
1586 /* Force a link down visible on the other side */
1588 bnx2_write_phy(bp, bp->mii_adv, adv &
1589 ~(ADVERTISE_1000XFULL |
1590 ADVERTISE_1000XHALF));
1591 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1592 BMCR_ANRESTART | BMCR_ANENABLE);
1595 netif_carrier_off(bp->dev);
1596 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1597 bnx2_report_link(bp);
1599 bnx2_write_phy(bp, bp->mii_adv, adv);
1600 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1602 bnx2_resolve_flow_ctrl(bp);
1603 bnx2_set_mac_link(bp);
1608 bnx2_test_and_enable_2g5(bp);
1610 if (bp->advertising & ADVERTISED_1000baseT_Full)
1611 new_adv |= ADVERTISE_1000XFULL;
1613 new_adv |= bnx2_phy_get_pause_adv(bp);
1615 bnx2_read_phy(bp, bp->mii_adv, &adv);
1616 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618 bp->serdes_an_pending = 0;
1619 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1620 /* Force a link down visible on the other side */
1622 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1623 spin_unlock_bh(&bp->phy_lock);
1625 spin_lock_bh(&bp->phy_lock);
1628 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1629 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1631 /* Speed up link-up time when the link partner
1632 * does not autonegotiate which is very common
1633 * in blade servers. Some blade servers use
1634 * IPMI for kerboard input and it's important
1635 * to minimize link disruptions. Autoneg. involves
1636 * exchanging base pages plus 3 next pages and
1637 * normally completes in about 120 msec.
1639 bp->current_interval = SERDES_AN_TIMEOUT;
1640 bp->serdes_an_pending = 1;
1641 mod_timer(&bp->timer, jiffies + bp->current_interval);
1643 bnx2_resolve_flow_ctrl(bp);
1644 bnx2_set_mac_link(bp);
1650 #define ETHTOOL_ALL_FIBRE_SPEED \
1651 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1652 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1653 (ADVERTISED_1000baseT_Full)
1655 #define ETHTOOL_ALL_COPPER_SPEED \
1656 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1657 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1658 ADVERTISED_1000baseT_Full)
1660 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1661 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1663 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1666 bnx2_set_default_remote_link(struct bnx2 *bp)
1670 if (bp->phy_port == PORT_TP)
1671 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1673 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1675 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1676 bp->req_line_speed = 0;
1677 bp->autoneg |= AUTONEG_SPEED;
1678 bp->advertising = ADVERTISED_Autoneg;
1679 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1680 bp->advertising |= ADVERTISED_10baseT_Half;
1681 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1682 bp->advertising |= ADVERTISED_10baseT_Full;
1683 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1684 bp->advertising |= ADVERTISED_100baseT_Half;
1685 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1686 bp->advertising |= ADVERTISED_100baseT_Full;
1687 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1688 bp->advertising |= ADVERTISED_1000baseT_Full;
1689 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1690 bp->advertising |= ADVERTISED_2500baseX_Full;
1693 bp->advertising = 0;
1694 bp->req_duplex = DUPLEX_FULL;
1695 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1696 bp->req_line_speed = SPEED_10;
1697 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698 bp->req_duplex = DUPLEX_HALF;
1700 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1701 bp->req_line_speed = SPEED_100;
1702 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1703 bp->req_duplex = DUPLEX_HALF;
1705 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706 bp->req_line_speed = SPEED_1000;
1707 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708 bp->req_line_speed = SPEED_2500;
1713 bnx2_set_default_link(struct bnx2 *bp)
1715 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1716 bnx2_set_default_remote_link(bp);
1720 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1721 bp->req_line_speed = 0;
1722 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1725 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1727 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1728 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1729 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1731 bp->req_line_speed = bp->line_speed = SPEED_1000;
1732 bp->req_duplex = DUPLEX_FULL;
1735 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1739 bnx2_send_heart_beat(struct bnx2 *bp)
1744 spin_lock(&bp->indirect_lock);
1745 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1746 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1747 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1748 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1749 spin_unlock(&bp->indirect_lock);
1753 bnx2_remote_phy_event(struct bnx2 *bp)
1756 u8 link_up = bp->link_up;
1759 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1761 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1762 bnx2_send_heart_beat(bp);
1764 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1766 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1772 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1773 bp->duplex = DUPLEX_FULL;
1775 case BNX2_LINK_STATUS_10HALF:
1776 bp->duplex = DUPLEX_HALF;
1777 case BNX2_LINK_STATUS_10FULL:
1778 bp->line_speed = SPEED_10;
1780 case BNX2_LINK_STATUS_100HALF:
1781 bp->duplex = DUPLEX_HALF;
1782 case BNX2_LINK_STATUS_100BASE_T4:
1783 case BNX2_LINK_STATUS_100FULL:
1784 bp->line_speed = SPEED_100;
1786 case BNX2_LINK_STATUS_1000HALF:
1787 bp->duplex = DUPLEX_HALF;
1788 case BNX2_LINK_STATUS_1000FULL:
1789 bp->line_speed = SPEED_1000;
1791 case BNX2_LINK_STATUS_2500HALF:
1792 bp->duplex = DUPLEX_HALF;
1793 case BNX2_LINK_STATUS_2500FULL:
1794 bp->line_speed = SPEED_2500;
1802 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1803 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1804 if (bp->duplex == DUPLEX_FULL)
1805 bp->flow_ctrl = bp->req_flow_ctrl;
1807 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1808 bp->flow_ctrl |= FLOW_CTRL_TX;
1809 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1810 bp->flow_ctrl |= FLOW_CTRL_RX;
1813 old_port = bp->phy_port;
1814 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1815 bp->phy_port = PORT_FIBRE;
1817 bp->phy_port = PORT_TP;
1819 if (old_port != bp->phy_port)
1820 bnx2_set_default_link(bp);
1823 if (bp->link_up != link_up)
1824 bnx2_report_link(bp);
1826 bnx2_set_mac_link(bp);
1830 bnx2_set_remote_link(struct bnx2 *bp)
1834 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1836 case BNX2_FW_EVT_CODE_LINK_EVENT:
1837 bnx2_remote_phy_event(bp);
1839 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1841 bnx2_send_heart_beat(bp);
1848 bnx2_setup_copper_phy(struct bnx2 *bp)
1853 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1855 if (bp->autoneg & AUTONEG_SPEED) {
1856 u32 adv_reg, adv1000_reg;
1857 u32 new_adv_reg = 0;
1858 u32 new_adv1000_reg = 0;
1860 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1861 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1862 ADVERTISE_PAUSE_ASYM);
1864 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1865 adv1000_reg &= PHY_ALL_1000_SPEED;
1867 if (bp->advertising & ADVERTISED_10baseT_Half)
1868 new_adv_reg |= ADVERTISE_10HALF;
1869 if (bp->advertising & ADVERTISED_10baseT_Full)
1870 new_adv_reg |= ADVERTISE_10FULL;
1871 if (bp->advertising & ADVERTISED_100baseT_Half)
1872 new_adv_reg |= ADVERTISE_100HALF;
1873 if (bp->advertising & ADVERTISED_100baseT_Full)
1874 new_adv_reg |= ADVERTISE_100FULL;
1875 if (bp->advertising & ADVERTISED_1000baseT_Full)
1876 new_adv1000_reg |= ADVERTISE_1000FULL;
1878 new_adv_reg |= ADVERTISE_CSMA;
1880 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1882 if ((adv1000_reg != new_adv1000_reg) ||
1883 (adv_reg != new_adv_reg) ||
1884 ((bmcr & BMCR_ANENABLE) == 0)) {
1886 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1887 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1888 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1891 else if (bp->link_up) {
1892 /* Flow ctrl may have changed from auto to forced */
1893 /* or vice-versa. */
1895 bnx2_resolve_flow_ctrl(bp);
1896 bnx2_set_mac_link(bp);
1902 if (bp->req_line_speed == SPEED_100) {
1903 new_bmcr |= BMCR_SPEED100;
1905 if (bp->req_duplex == DUPLEX_FULL) {
1906 new_bmcr |= BMCR_FULLDPLX;
1908 if (new_bmcr != bmcr) {
1911 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1912 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1914 if (bmsr & BMSR_LSTATUS) {
1915 /* Force link down */
1916 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1917 spin_unlock_bh(&bp->phy_lock);
1919 spin_lock_bh(&bp->phy_lock);
1921 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1922 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1925 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1927 /* Normally, the new speed is setup after the link has
1928 * gone down and up again. In some cases, link will not go
1929 * down so we need to set up the new speed here.
1931 if (bmsr & BMSR_LSTATUS) {
1932 bp->line_speed = bp->req_line_speed;
1933 bp->duplex = bp->req_duplex;
1934 bnx2_resolve_flow_ctrl(bp);
1935 bnx2_set_mac_link(bp);
1938 bnx2_resolve_flow_ctrl(bp);
1939 bnx2_set_mac_link(bp);
1945 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1947 if (bp->loopback == MAC_LOOPBACK)
1950 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1951 return (bnx2_setup_serdes_phy(bp, port));
1954 return (bnx2_setup_copper_phy(bp));
1959 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1963 bp->mii_bmcr = MII_BMCR + 0x10;
1964 bp->mii_bmsr = MII_BMSR + 0x10;
1965 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1966 bp->mii_adv = MII_ADVERTISE + 0x10;
1967 bp->mii_lpa = MII_LPA + 0x10;
1968 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1970 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1971 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1973 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1977 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1979 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1980 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1981 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1982 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1985 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1986 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1987 val |= BCM5708S_UP1_2G5;
1989 val &= ~BCM5708S_UP1_2G5;
1990 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1992 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1993 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1994 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1995 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1999 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2000 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2001 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2003 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2009 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2016 bp->mii_up1 = BCM5708S_UP1;
2018 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2019 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2020 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2022 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2023 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2024 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2026 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2027 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2028 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2030 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2031 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2032 val |= BCM5708S_UP1_2G5;
2033 bnx2_write_phy(bp, BCM5708S_UP1, val);
2036 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2037 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2038 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2039 /* increase tx signal amplitude */
2040 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2041 BCM5708S_BLK_ADDR_TX_MISC);
2042 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2043 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2044 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2045 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2048 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2049 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2054 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2055 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2056 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2057 BCM5708S_BLK_ADDR_TX_MISC);
2058 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2059 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2060 BCM5708S_BLK_ADDR_DIG);
2067 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2072 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2074 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2075 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2077 if (bp->dev->mtu > 1500) {
2080 /* Set extended packet length bit */
2081 bnx2_write_phy(bp, 0x18, 0x7);
2082 bnx2_read_phy(bp, 0x18, &val);
2083 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2085 bnx2_write_phy(bp, 0x1c, 0x6c00);
2086 bnx2_read_phy(bp, 0x1c, &val);
2087 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2092 bnx2_write_phy(bp, 0x18, 0x7);
2093 bnx2_read_phy(bp, 0x18, &val);
2094 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2096 bnx2_write_phy(bp, 0x1c, 0x6c00);
2097 bnx2_read_phy(bp, 0x1c, &val);
2098 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2105 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2112 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2113 bnx2_write_phy(bp, 0x18, 0x0c00);
2114 bnx2_write_phy(bp, 0x17, 0x000a);
2115 bnx2_write_phy(bp, 0x15, 0x310b);
2116 bnx2_write_phy(bp, 0x17, 0x201f);
2117 bnx2_write_phy(bp, 0x15, 0x9506);
2118 bnx2_write_phy(bp, 0x17, 0x401f);
2119 bnx2_write_phy(bp, 0x15, 0x14e2);
2120 bnx2_write_phy(bp, 0x18, 0x0400);
2123 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2124 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2125 MII_BNX2_DSP_EXPAND_REG | 0x8);
2126 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2128 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2131 if (bp->dev->mtu > 1500) {
2132 /* Set extended packet length bit */
2133 bnx2_write_phy(bp, 0x18, 0x7);
2134 bnx2_read_phy(bp, 0x18, &val);
2135 bnx2_write_phy(bp, 0x18, val | 0x4000);
2137 bnx2_read_phy(bp, 0x10, &val);
2138 bnx2_write_phy(bp, 0x10, val | 0x1);
2141 bnx2_write_phy(bp, 0x18, 0x7);
2142 bnx2_read_phy(bp, 0x18, &val);
2143 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2145 bnx2_read_phy(bp, 0x10, &val);
2146 bnx2_write_phy(bp, 0x10, val & ~0x1);
2149 /* ethernet@wirespeed */
2150 bnx2_write_phy(bp, 0x18, 0x7007);
2151 bnx2_read_phy(bp, 0x18, &val);
2152 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2158 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2163 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2164 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2166 bp->mii_bmcr = MII_BMCR;
2167 bp->mii_bmsr = MII_BMSR;
2168 bp->mii_bmsr1 = MII_BMSR;
2169 bp->mii_adv = MII_ADVERTISE;
2170 bp->mii_lpa = MII_LPA;
2172 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2174 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2177 bnx2_read_phy(bp, MII_PHYSID1, &val);
2178 bp->phy_id = val << 16;
2179 bnx2_read_phy(bp, MII_PHYSID2, &val);
2180 bp->phy_id |= val & 0xffff;
2182 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2183 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2184 rc = bnx2_init_5706s_phy(bp, reset_phy);
2185 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2186 rc = bnx2_init_5708s_phy(bp, reset_phy);
2187 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2188 rc = bnx2_init_5709s_phy(bp, reset_phy);
2191 rc = bnx2_init_copper_phy(bp, reset_phy);
2196 rc = bnx2_setup_phy(bp, bp->phy_port);
2202 bnx2_set_mac_loopback(struct bnx2 *bp)
2206 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2207 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2208 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2209 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2214 static int bnx2_test_link(struct bnx2 *);
2217 bnx2_set_phy_loopback(struct bnx2 *bp)
2222 spin_lock_bh(&bp->phy_lock);
2223 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2225 spin_unlock_bh(&bp->phy_lock);
2229 for (i = 0; i < 10; i++) {
2230 if (bnx2_test_link(bp) == 0)
2235 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2236 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2237 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2238 BNX2_EMAC_MODE_25G_MODE);
2240 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2241 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2247 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2253 msg_data |= bp->fw_wr_seq;
2255 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2257 /* wait for an acknowledgement. */
2258 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2261 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2263 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2266 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2269 /* If we timed out, inform the firmware that this is the case. */
2270 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2272 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2275 msg_data &= ~BNX2_DRV_MSG_CODE;
2276 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2278 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2283 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2290 bnx2_init_5709_context(struct bnx2 *bp)
2295 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2296 val |= (BCM_PAGE_BITS - 8) << 16;
2297 REG_WR(bp, BNX2_CTX_COMMAND, val);
2298 for (i = 0; i < 10; i++) {
2299 val = REG_RD(bp, BNX2_CTX_COMMAND);
2300 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2304 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2307 for (i = 0; i < bp->ctx_pages; i++) {
2311 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2315 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2316 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2317 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2318 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2319 (u64) bp->ctx_blk_mapping[i] >> 32);
2320 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2321 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2322 for (j = 0; j < 10; j++) {
2324 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2325 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2329 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2338 bnx2_init_context(struct bnx2 *bp)
2344 u32 vcid_addr, pcid_addr, offset;
2349 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2352 vcid_addr = GET_PCID_ADDR(vcid);
2354 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2359 pcid_addr = GET_PCID_ADDR(new_vcid);
2362 vcid_addr = GET_CID_ADDR(vcid);
2363 pcid_addr = vcid_addr;
2366 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2367 vcid_addr += (i << PHY_CTX_SHIFT);
2368 pcid_addr += (i << PHY_CTX_SHIFT);
2370 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2371 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2373 /* Zero out the context. */
2374 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2375 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2381 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2387 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2388 if (good_mbuf == NULL) {
2389 printk(KERN_ERR PFX "Failed to allocate memory in "
2390 "bnx2_alloc_bad_rbuf\n");
2394 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2395 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2399 /* Allocate a bunch of mbufs and save the good ones in an array. */
2400 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2401 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2402 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2403 BNX2_RBUF_COMMAND_ALLOC_REQ);
2405 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2407 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2409 /* The addresses with Bit 9 set are bad memory blocks. */
2410 if (!(val & (1 << 9))) {
2411 good_mbuf[good_mbuf_cnt] = (u16) val;
2415 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2418 /* Free the good ones back to the mbuf pool thus discarding
2419 * all the bad ones. */
2420 while (good_mbuf_cnt) {
2423 val = good_mbuf[good_mbuf_cnt];
2424 val = (val << 9) | val | 1;
2426 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2433 bnx2_set_mac_addr(struct bnx2 *bp)
2436 u8 *mac_addr = bp->dev->dev_addr;
2438 val = (mac_addr[0] << 8) | mac_addr[1];
2440 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2442 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2443 (mac_addr[4] << 8) | mac_addr[5];
2445 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2449 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2452 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2453 struct rx_bd *rxbd =
2454 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2455 struct page *page = alloc_page(GFP_ATOMIC);
2459 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2460 PCI_DMA_FROMDEVICE);
2462 pci_unmap_addr_set(rx_pg, mapping, mapping);
2463 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2464 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2469 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2471 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2472 struct page *page = rx_pg->page;
2477 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2478 PCI_DMA_FROMDEVICE);
2485 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2487 struct sk_buff *skb;
2488 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2490 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2491 unsigned long align;
2493 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2498 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2499 skb_reserve(skb, BNX2_RX_ALIGN - align);
2501 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2502 PCI_DMA_FROMDEVICE);
2505 pci_unmap_addr_set(rx_buf, mapping, mapping);
2507 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2508 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2510 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2516 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2518 struct status_block *sblk = bnapi->status_blk;
2519 u32 new_link_state, old_link_state;
2522 new_link_state = sblk->status_attn_bits & event;
2523 old_link_state = sblk->status_attn_bits_ack & event;
2524 if (new_link_state != old_link_state) {
2526 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2528 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2536 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2538 spin_lock(&bp->phy_lock);
2540 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2542 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2543 bnx2_set_remote_link(bp);
2545 spin_unlock(&bp->phy_lock);
2550 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2554 if (bnapi->int_num == 0)
2555 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2557 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2559 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2565 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2567 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2568 u16 hw_cons, sw_cons, sw_ring_cons;
2571 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2572 sw_cons = txr->tx_cons;
2574 while (sw_cons != hw_cons) {
2575 struct sw_bd *tx_buf;
2576 struct sk_buff *skb;
2579 sw_ring_cons = TX_RING_IDX(sw_cons);
2581 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2584 /* partial BD completions possible with TSO packets */
2585 if (skb_is_gso(skb)) {
2586 u16 last_idx, last_ring_idx;
2588 last_idx = sw_cons +
2589 skb_shinfo(skb)->nr_frags + 1;
2590 last_ring_idx = sw_ring_cons +
2591 skb_shinfo(skb)->nr_frags + 1;
2592 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2595 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2600 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2601 skb_headlen(skb), PCI_DMA_TODEVICE);
2604 last = skb_shinfo(skb)->nr_frags;
2606 for (i = 0; i < last; i++) {
2607 sw_cons = NEXT_TX_BD(sw_cons);
2609 pci_unmap_page(bp->pdev,
2611 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2613 skb_shinfo(skb)->frags[i].size,
2617 sw_cons = NEXT_TX_BD(sw_cons);
2621 if (tx_pkt == budget)
2624 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2627 txr->hw_tx_cons = hw_cons;
2628 txr->tx_cons = sw_cons;
2629 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2630 * before checking for netif_queue_stopped(). Without the
2631 * memory barrier, there is a small possibility that bnx2_start_xmit()
2632 * will miss it and cause the queue to be stopped forever.
2636 if (unlikely(netif_queue_stopped(bp->dev)) &&
2637 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2638 netif_tx_lock(bp->dev);
2639 if ((netif_queue_stopped(bp->dev)) &&
2640 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2641 netif_wake_queue(bp->dev);
2642 netif_tx_unlock(bp->dev);
2648 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2649 struct sk_buff *skb, int count)
2651 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2652 struct rx_bd *cons_bd, *prod_bd;
2655 u16 hw_prod = rxr->rx_pg_prod, prod;
2656 u16 cons = rxr->rx_pg_cons;
2658 for (i = 0; i < count; i++) {
2659 prod = RX_PG_RING_IDX(hw_prod);
2661 prod_rx_pg = &rxr->rx_pg_ring[prod];
2662 cons_rx_pg = &rxr->rx_pg_ring[cons];
2663 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2664 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2666 if (i == 0 && skb) {
2668 struct skb_shared_info *shinfo;
2670 shinfo = skb_shinfo(skb);
2672 page = shinfo->frags[shinfo->nr_frags].page;
2673 shinfo->frags[shinfo->nr_frags].page = NULL;
2674 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2675 PCI_DMA_FROMDEVICE);
2676 cons_rx_pg->page = page;
2677 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2681 prod_rx_pg->page = cons_rx_pg->page;
2682 cons_rx_pg->page = NULL;
2683 pci_unmap_addr_set(prod_rx_pg, mapping,
2684 pci_unmap_addr(cons_rx_pg, mapping));
2686 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2687 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2690 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2691 hw_prod = NEXT_RX_BD(hw_prod);
2693 rxr->rx_pg_prod = hw_prod;
2694 rxr->rx_pg_cons = cons;
2698 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2699 struct sk_buff *skb, u16 cons, u16 prod)
2701 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2702 struct rx_bd *cons_bd, *prod_bd;
2704 cons_rx_buf = &rxr->rx_buf_ring[cons];
2705 prod_rx_buf = &rxr->rx_buf_ring[prod];
2707 pci_dma_sync_single_for_device(bp->pdev,
2708 pci_unmap_addr(cons_rx_buf, mapping),
2709 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2711 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2713 prod_rx_buf->skb = skb;
2718 pci_unmap_addr_set(prod_rx_buf, mapping,
2719 pci_unmap_addr(cons_rx_buf, mapping));
2721 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2722 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2723 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2724 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2728 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2729 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2733 u16 prod = ring_idx & 0xffff;
2735 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2736 if (unlikely(err)) {
2737 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2739 unsigned int raw_len = len + 4;
2740 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2742 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2747 skb_reserve(skb, BNX2_RX_OFFSET);
2748 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2749 PCI_DMA_FROMDEVICE);
2755 unsigned int i, frag_len, frag_size, pages;
2756 struct sw_pg *rx_pg;
2757 u16 pg_cons = rxr->rx_pg_cons;
2758 u16 pg_prod = rxr->rx_pg_prod;
2760 frag_size = len + 4 - hdr_len;
2761 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2762 skb_put(skb, hdr_len);
2764 for (i = 0; i < pages; i++) {
2765 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2766 if (unlikely(frag_len <= 4)) {
2767 unsigned int tail = 4 - frag_len;
2769 rxr->rx_pg_cons = pg_cons;
2770 rxr->rx_pg_prod = pg_prod;
2771 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2778 &skb_shinfo(skb)->frags[i - 1];
2780 skb->data_len -= tail;
2781 skb->truesize -= tail;
2785 rx_pg = &rxr->rx_pg_ring[pg_cons];
2787 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2788 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2793 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2796 err = bnx2_alloc_rx_page(bp, rxr,
2797 RX_PG_RING_IDX(pg_prod));
2798 if (unlikely(err)) {
2799 rxr->rx_pg_cons = pg_cons;
2800 rxr->rx_pg_prod = pg_prod;
2801 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2806 frag_size -= frag_len;
2807 skb->data_len += frag_len;
2808 skb->truesize += frag_len;
2809 skb->len += frag_len;
2811 pg_prod = NEXT_RX_BD(pg_prod);
2812 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2814 rxr->rx_pg_prod = pg_prod;
2815 rxr->rx_pg_cons = pg_cons;
2821 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2825 if (bnapi->int_num == 0)
2826 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2828 cons = bnapi->status_blk_msix->status_rx_quick_consumer_index;
2830 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2836 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2838 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2839 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2840 struct l2_fhdr *rx_hdr;
2841 int rx_pkt = 0, pg_ring_used = 0;
2843 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2844 sw_cons = rxr->rx_cons;
2845 sw_prod = rxr->rx_prod;
2847 /* Memory barrier necessary as speculative reads of the rx
2848 * buffer can be ahead of the index in the status block
2851 while (sw_cons != hw_cons) {
2852 unsigned int len, hdr_len;
2854 struct sw_bd *rx_buf;
2855 struct sk_buff *skb;
2856 dma_addr_t dma_addr;
2858 sw_ring_cons = RX_RING_IDX(sw_cons);
2859 sw_ring_prod = RX_RING_IDX(sw_prod);
2861 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2866 dma_addr = pci_unmap_addr(rx_buf, mapping);
2868 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2869 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2870 PCI_DMA_FROMDEVICE);
2872 rx_hdr = (struct l2_fhdr *) skb->data;
2873 len = rx_hdr->l2_fhdr_pkt_len;
2875 if ((status = rx_hdr->l2_fhdr_status) &
2876 (L2_FHDR_ERRORS_BAD_CRC |
2877 L2_FHDR_ERRORS_PHY_DECODE |
2878 L2_FHDR_ERRORS_ALIGNMENT |
2879 L2_FHDR_ERRORS_TOO_SHORT |
2880 L2_FHDR_ERRORS_GIANT_FRAME)) {
2882 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2887 if (status & L2_FHDR_STATUS_SPLIT) {
2888 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2890 } else if (len > bp->rx_jumbo_thresh) {
2891 hdr_len = bp->rx_jumbo_thresh;
2897 if (len <= bp->rx_copy_thresh) {
2898 struct sk_buff *new_skb;
2900 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2901 if (new_skb == NULL) {
2902 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2908 skb_copy_from_linear_data_offset(skb,
2910 new_skb->data, len + 2);
2911 skb_reserve(new_skb, 2);
2912 skb_put(new_skb, len);
2914 bnx2_reuse_rx_skb(bp, rxr, skb,
2915 sw_ring_cons, sw_ring_prod);
2918 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2919 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2922 skb->protocol = eth_type_trans(skb, bp->dev);
2924 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2925 (ntohs(skb->protocol) != 0x8100)) {
2932 skb->ip_summed = CHECKSUM_NONE;
2934 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2935 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2937 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2938 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2939 skb->ip_summed = CHECKSUM_UNNECESSARY;
2943 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2944 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2945 rx_hdr->l2_fhdr_vlan_tag);
2949 netif_receive_skb(skb);
2951 bp->dev->last_rx = jiffies;
2955 sw_cons = NEXT_RX_BD(sw_cons);
2956 sw_prod = NEXT_RX_BD(sw_prod);
2958 if ((rx_pkt == budget))
2961 /* Refresh hw_cons to see if there is new work */
2962 if (sw_cons == hw_cons) {
2963 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2967 rxr->rx_cons = sw_cons;
2968 rxr->rx_prod = sw_prod;
2971 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2973 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2975 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2983 /* MSI ISR - The only difference between this and the INTx ISR
2984 * is that the MSI interrupt is always serviced.
2987 bnx2_msi(int irq, void *dev_instance)
2989 struct net_device *dev = dev_instance;
2990 struct bnx2 *bp = netdev_priv(dev);
2991 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2993 prefetch(bnapi->status_blk);
2994 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2995 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2996 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2998 /* Return here if interrupt is disabled. */
2999 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3002 netif_rx_schedule(dev, &bnapi->napi);
3008 bnx2_msi_1shot(int irq, void *dev_instance)
3010 struct net_device *dev = dev_instance;
3011 struct bnx2 *bp = netdev_priv(dev);
3012 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3014 prefetch(bnapi->status_blk);
3016 /* Return here if interrupt is disabled. */
3017 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3020 netif_rx_schedule(dev, &bnapi->napi);
3026 bnx2_interrupt(int irq, void *dev_instance)
3028 struct net_device *dev = dev_instance;
3029 struct bnx2 *bp = netdev_priv(dev);
3030 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3031 struct status_block *sblk = bnapi->status_blk;
3033 /* When using INTx, it is possible for the interrupt to arrive
3034 * at the CPU before the status block posted prior to the
3035 * interrupt. Reading a register will flush the status block.
3036 * When using MSI, the MSI message will always complete after
3037 * the status block write.
3039 if ((sblk->status_idx == bnapi->last_status_idx) &&
3040 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3041 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3044 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3045 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3046 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3048 /* Read back to deassert IRQ immediately to avoid too many
3049 * spurious interrupts.
3051 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3053 /* Return here if interrupt is shared and is disabled. */
3054 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3057 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3058 bnapi->last_status_idx = sblk->status_idx;
3059 __netif_rx_schedule(dev, &bnapi->napi);
3066 bnx2_tx_msix(int irq, void *dev_instance)
3068 struct net_device *dev = dev_instance;
3069 struct bnx2 *bp = netdev_priv(dev);
3070 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
3072 prefetch(bnapi->status_blk_msix);
3074 /* Return here if interrupt is disabled. */
3075 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3078 netif_rx_schedule(dev, &bnapi->napi);
3082 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3083 STATUS_ATTN_BITS_TIMER_ABORT)
3086 bnx2_has_work(struct bnx2_napi *bnapi)
3088 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3089 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3090 struct status_block *sblk = bnapi->status_blk;
3092 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3093 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3096 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3097 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3103 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3105 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3106 struct bnx2 *bp = bnapi->bp;
3107 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3109 struct status_block_msix *sblk = bnapi->status_blk_msix;
3112 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3113 if (unlikely(work_done >= budget))
3116 bnapi->last_status_idx = sblk->status_idx;
3118 } while (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons);
3120 netif_rx_complete(bp->dev, napi);
3121 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3122 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3123 bnapi->last_status_idx);
3127 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3128 int work_done, int budget)
3130 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3131 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3132 struct status_block *sblk = bnapi->status_blk;
3133 u32 status_attn_bits = sblk->status_attn_bits;
3134 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3136 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3137 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3139 bnx2_phy_int(bp, bnapi);
3141 /* This is needed to take care of transient status
3142 * during link changes.
3144 REG_WR(bp, BNX2_HC_COMMAND,
3145 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3146 REG_RD(bp, BNX2_HC_COMMAND);
3149 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3150 bnx2_tx_int(bp, bnapi, 0);
3152 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3153 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3158 static int bnx2_poll(struct napi_struct *napi, int budget)
3160 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3161 struct bnx2 *bp = bnapi->bp;
3163 struct status_block *sblk = bnapi->status_blk;
3166 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3168 if (unlikely(work_done >= budget))
3171 /* bnapi->last_status_idx is used below to tell the hw how
3172 * much work has been processed, so we must read it before
3173 * checking for more work.
3175 bnapi->last_status_idx = sblk->status_idx;
3177 if (likely(!bnx2_has_work(bnapi))) {
3178 netif_rx_complete(bp->dev, napi);
3179 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3180 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3181 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3182 bnapi->last_status_idx);
3185 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3186 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3187 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3188 bnapi->last_status_idx);
3190 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3191 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3192 bnapi->last_status_idx);
3200 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3201 * from set_multicast.
3204 bnx2_set_rx_mode(struct net_device *dev)
3206 struct bnx2 *bp = netdev_priv(dev);
3207 u32 rx_mode, sort_mode;
3210 spin_lock_bh(&bp->phy_lock);
3212 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3213 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3214 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3216 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3217 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3219 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3220 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3222 if (dev->flags & IFF_PROMISC) {
3223 /* Promiscuous mode. */
3224 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3225 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3226 BNX2_RPM_SORT_USER0_PROM_VLAN;
3228 else if (dev->flags & IFF_ALLMULTI) {
3229 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3230 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3233 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3236 /* Accept one or more multicast(s). */
3237 struct dev_mc_list *mclist;
3238 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3243 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3245 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3246 i++, mclist = mclist->next) {
3248 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3250 regidx = (bit & 0xe0) >> 5;
3252 mc_filter[regidx] |= (1 << bit);
3255 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3256 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3260 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3263 if (rx_mode != bp->rx_mode) {
3264 bp->rx_mode = rx_mode;
3265 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3268 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3269 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3270 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3272 spin_unlock_bh(&bp->phy_lock);
3276 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3282 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3283 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3284 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3285 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3286 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3289 for (i = 0; i < rv2p_code_len; i += 8) {
3290 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3292 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3295 if (rv2p_proc == RV2P_PROC1) {
3296 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3297 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3300 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3301 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3305 /* Reset the processor, un-stall is done later. */
3306 if (rv2p_proc == RV2P_PROC1) {
3307 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3310 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3315 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3322 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3323 val |= cpu_reg->mode_value_halt;
3324 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3325 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3327 /* Load the Text area. */
3328 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3332 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3337 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3338 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3342 /* Load the Data area. */
3343 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3347 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3348 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3352 /* Load the SBSS area. */
3353 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3357 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3358 bnx2_reg_wr_ind(bp, offset, 0);
3362 /* Load the BSS area. */
3363 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3367 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3368 bnx2_reg_wr_ind(bp, offset, 0);
3372 /* Load the Read-Only area. */
3373 offset = cpu_reg->spad_base +
3374 (fw->rodata_addr - cpu_reg->mips_view_base);
3378 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3379 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3383 /* Clear the pre-fetch instruction. */
3384 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3385 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3387 /* Start the CPU. */
3388 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3389 val &= ~cpu_reg->mode_value_halt;
3390 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3391 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3397 bnx2_init_cpus(struct bnx2 *bp)
3403 /* Initialize the RV2P processor. */
3404 text = vmalloc(FW_BUF_SIZE);
3407 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3408 rv2p = bnx2_xi_rv2p_proc1;
3409 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3411 rv2p = bnx2_rv2p_proc1;
3412 rv2p_len = sizeof(bnx2_rv2p_proc1);
3414 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3418 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3420 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3421 rv2p = bnx2_xi_rv2p_proc2;
3422 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3424 rv2p = bnx2_rv2p_proc2;
3425 rv2p_len = sizeof(bnx2_rv2p_proc2);
3427 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3431 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3433 /* Initialize the RX Processor. */
3434 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3435 fw = &bnx2_rxp_fw_09;
3437 fw = &bnx2_rxp_fw_06;
3440 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3444 /* Initialize the TX Processor. */
3445 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446 fw = &bnx2_txp_fw_09;
3448 fw = &bnx2_txp_fw_06;
3451 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3455 /* Initialize the TX Patch-up Processor. */
3456 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3457 fw = &bnx2_tpat_fw_09;
3459 fw = &bnx2_tpat_fw_06;
3462 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3466 /* Initialize the Completion Processor. */
3467 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3468 fw = &bnx2_com_fw_09;
3470 fw = &bnx2_com_fw_06;
3473 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3477 /* Initialize the Command Processor. */
3478 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3479 fw = &bnx2_cp_fw_09;
3481 fw = &bnx2_cp_fw_06;
3484 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3492 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3496 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3502 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3503 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3504 PCI_PM_CTRL_PME_STATUS);
3506 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3507 /* delay required during transition out of D3hot */
3510 val = REG_RD(bp, BNX2_EMAC_MODE);
3511 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3512 val &= ~BNX2_EMAC_MODE_MPKT;
3513 REG_WR(bp, BNX2_EMAC_MODE, val);
3515 val = REG_RD(bp, BNX2_RPM_CONFIG);
3516 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3517 REG_WR(bp, BNX2_RPM_CONFIG, val);
3528 autoneg = bp->autoneg;
3529 advertising = bp->advertising;
3531 if (bp->phy_port == PORT_TP) {
3532 bp->autoneg = AUTONEG_SPEED;
3533 bp->advertising = ADVERTISED_10baseT_Half |
3534 ADVERTISED_10baseT_Full |
3535 ADVERTISED_100baseT_Half |
3536 ADVERTISED_100baseT_Full |
3540 spin_lock_bh(&bp->phy_lock);
3541 bnx2_setup_phy(bp, bp->phy_port);
3542 spin_unlock_bh(&bp->phy_lock);
3544 bp->autoneg = autoneg;
3545 bp->advertising = advertising;
3547 bnx2_set_mac_addr(bp);
3549 val = REG_RD(bp, BNX2_EMAC_MODE);
3551 /* Enable port mode. */
3552 val &= ~BNX2_EMAC_MODE_PORT;
3553 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3554 BNX2_EMAC_MODE_ACPI_RCVD |
3555 BNX2_EMAC_MODE_MPKT;
3556 if (bp->phy_port == PORT_TP)
3557 val |= BNX2_EMAC_MODE_PORT_MII;
3559 val |= BNX2_EMAC_MODE_PORT_GMII;
3560 if (bp->line_speed == SPEED_2500)
3561 val |= BNX2_EMAC_MODE_25G_MODE;
3564 REG_WR(bp, BNX2_EMAC_MODE, val);
3566 /* receive all multicast */
3567 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3568 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3571 REG_WR(bp, BNX2_EMAC_RX_MODE,
3572 BNX2_EMAC_RX_MODE_SORT_MODE);
3574 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3575 BNX2_RPM_SORT_USER0_MC_EN;
3576 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3577 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3578 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3579 BNX2_RPM_SORT_USER0_ENA);
3581 /* Need to enable EMAC and RPM for WOL. */
3582 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3583 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3584 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3585 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3587 val = REG_RD(bp, BNX2_RPM_CONFIG);
3588 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3589 REG_WR(bp, BNX2_RPM_CONFIG, val);
3591 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3594 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3597 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3598 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3600 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3601 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3602 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3611 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3613 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3616 /* No more memory access after this point until
3617 * device is brought back to D0.
3629 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3634 /* Request access to the flash interface. */
3635 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3636 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3637 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3638 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3644 if (j >= NVRAM_TIMEOUT_COUNT)
3651 bnx2_release_nvram_lock(struct bnx2 *bp)
3656 /* Relinquish nvram interface. */
3657 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3659 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3660 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3661 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3667 if (j >= NVRAM_TIMEOUT_COUNT)
3675 bnx2_enable_nvram_write(struct bnx2 *bp)
3679 val = REG_RD(bp, BNX2_MISC_CFG);
3680 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3682 if (bp->flash_info->flags & BNX2_NV_WREN) {
3685 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3686 REG_WR(bp, BNX2_NVM_COMMAND,
3687 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3689 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3692 val = REG_RD(bp, BNX2_NVM_COMMAND);
3693 if (val & BNX2_NVM_COMMAND_DONE)
3697 if (j >= NVRAM_TIMEOUT_COUNT)
3704 bnx2_disable_nvram_write(struct bnx2 *bp)
3708 val = REG_RD(bp, BNX2_MISC_CFG);
3709 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3714 bnx2_enable_nvram_access(struct bnx2 *bp)
3718 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3719 /* Enable both bits, even on read. */
3720 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3721 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3725 bnx2_disable_nvram_access(struct bnx2 *bp)
3729 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3730 /* Disable both bits, even after read. */
3731 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3732 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3733 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3737 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3742 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3743 /* Buffered flash, no erase needed */
3746 /* Build an erase command */
3747 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3748 BNX2_NVM_COMMAND_DOIT;
3750 /* Need to clear DONE bit separately. */
3751 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3753 /* Address of the NVRAM to read from. */
3754 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3756 /* Issue an erase command. */
3757 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3759 /* Wait for completion. */
3760 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3765 val = REG_RD(bp, BNX2_NVM_COMMAND);
3766 if (val & BNX2_NVM_COMMAND_DONE)
3770 if (j >= NVRAM_TIMEOUT_COUNT)
3777 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3782 /* Build the command word. */
3783 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3785 /* Calculate an offset of a buffered flash, not needed for 5709. */
3786 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3787 offset = ((offset / bp->flash_info->page_size) <<
3788 bp->flash_info->page_bits) +
3789 (offset % bp->flash_info->page_size);
3792 /* Need to clear DONE bit separately. */
3793 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3795 /* Address of the NVRAM to read from. */
3796 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3798 /* Issue a read command. */
3799 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3801 /* Wait for completion. */
3802 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3807 val = REG_RD(bp, BNX2_NVM_COMMAND);
3808 if (val & BNX2_NVM_COMMAND_DONE) {
3809 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3810 memcpy(ret_val, &v, 4);
3814 if (j >= NVRAM_TIMEOUT_COUNT)
3822 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3828 /* Build the command word. */
3829 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3831 /* Calculate an offset of a buffered flash, not needed for 5709. */
3832 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3833 offset = ((offset / bp->flash_info->page_size) <<
3834 bp->flash_info->page_bits) +
3835 (offset % bp->flash_info->page_size);
3838 /* Need to clear DONE bit separately. */
3839 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3841 memcpy(&val32, val, 4);
3843 /* Write the data. */
3844 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3846 /* Address of the NVRAM to write to. */
3847 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3849 /* Issue the write command. */
3850 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3852 /* Wait for completion. */
3853 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3856 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3859 if (j >= NVRAM_TIMEOUT_COUNT)
3866 bnx2_init_nvram(struct bnx2 *bp)
3869 int j, entry_count, rc = 0;
3870 struct flash_spec *flash;
3872 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3873 bp->flash_info = &flash_5709;
3874 goto get_flash_size;
3877 /* Determine the selected interface. */
3878 val = REG_RD(bp, BNX2_NVM_CFG1);
3880 entry_count = ARRAY_SIZE(flash_table);
3882 if (val & 0x40000000) {
3884 /* Flash interface has been reconfigured */
3885 for (j = 0, flash = &flash_table[0]; j < entry_count;
3887 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3888 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3889 bp->flash_info = flash;
3896 /* Not yet been reconfigured */
3898 if (val & (1 << 23))
3899 mask = FLASH_BACKUP_STRAP_MASK;
3901 mask = FLASH_STRAP_MASK;
3903 for (j = 0, flash = &flash_table[0]; j < entry_count;
3906 if ((val & mask) == (flash->strapping & mask)) {
3907 bp->flash_info = flash;
3909 /* Request access to the flash interface. */
3910 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3913 /* Enable access to flash interface */
3914 bnx2_enable_nvram_access(bp);
3916 /* Reconfigure the flash interface */
3917 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3918 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3919 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3920 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3922 /* Disable access to flash interface */
3923 bnx2_disable_nvram_access(bp);
3924 bnx2_release_nvram_lock(bp);
3929 } /* if (val & 0x40000000) */
3931 if (j == entry_count) {
3932 bp->flash_info = NULL;
3933 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3938 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3939 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3941 bp->flash_size = val;
3943 bp->flash_size = bp->flash_info->total_size;
3949 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3953 u32 cmd_flags, offset32, len32, extra;
3958 /* Request access to the flash interface. */
3959 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3962 /* Enable access to flash interface */
3963 bnx2_enable_nvram_access(bp);
3976 pre_len = 4 - (offset & 3);
3978 if (pre_len >= len32) {
3980 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3981 BNX2_NVM_COMMAND_LAST;
3984 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3987 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3992 memcpy(ret_buf, buf + (offset & 3), pre_len);
3999 extra = 4 - (len32 & 3);
4000 len32 = (len32 + 4) & ~3;
4007 cmd_flags = BNX2_NVM_COMMAND_LAST;
4009 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4010 BNX2_NVM_COMMAND_LAST;
4012 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4014 memcpy(ret_buf, buf, 4 - extra);
4016 else if (len32 > 0) {
4019 /* Read the first word. */
4023 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4025 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4027 /* Advance to the next dword. */
4032 while (len32 > 4 && rc == 0) {
4033 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4035 /* Advance to the next dword. */
4044 cmd_flags = BNX2_NVM_COMMAND_LAST;
4045 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4047 memcpy(ret_buf, buf, 4 - extra);
4050 /* Disable access to flash interface */
4051 bnx2_disable_nvram_access(bp);
4053 bnx2_release_nvram_lock(bp);
4059 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4062 u32 written, offset32, len32;
4063 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4065 int align_start, align_end;
4070 align_start = align_end = 0;
4072 if ((align_start = (offset32 & 3))) {
4074 len32 += align_start;
4077 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4082 align_end = 4 - (len32 & 3);
4084 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4088 if (align_start || align_end) {
4089 align_buf = kmalloc(len32, GFP_KERNEL);
4090 if (align_buf == NULL)
4093 memcpy(align_buf, start, 4);
4096 memcpy(align_buf + len32 - 4, end, 4);
4098 memcpy(align_buf + align_start, data_buf, buf_size);
4102 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4103 flash_buffer = kmalloc(264, GFP_KERNEL);
4104 if (flash_buffer == NULL) {
4106 goto nvram_write_end;
4111 while ((written < len32) && (rc == 0)) {
4112 u32 page_start, page_end, data_start, data_end;
4113 u32 addr, cmd_flags;
4116 /* Find the page_start addr */
4117 page_start = offset32 + written;
4118 page_start -= (page_start % bp->flash_info->page_size);
4119 /* Find the page_end addr */
4120 page_end = page_start + bp->flash_info->page_size;
4121 /* Find the data_start addr */
4122 data_start = (written == 0) ? offset32 : page_start;
4123 /* Find the data_end addr */
4124 data_end = (page_end > offset32 + len32) ?
4125 (offset32 + len32) : page_end;
4127 /* Request access to the flash interface. */
4128 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4129 goto nvram_write_end;
4131 /* Enable access to flash interface */
4132 bnx2_enable_nvram_access(bp);
4134 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4135 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4138 /* Read the whole page into the buffer
4139 * (non-buffer flash only) */
4140 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4141 if (j == (bp->flash_info->page_size - 4)) {
4142 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4144 rc = bnx2_nvram_read_dword(bp,
4150 goto nvram_write_end;
4156 /* Enable writes to flash interface (unlock write-protect) */
4157 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4158 goto nvram_write_end;
4160 /* Loop to write back the buffer data from page_start to
4163 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4164 /* Erase the page */
4165 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4166 goto nvram_write_end;
4168 /* Re-enable the write again for the actual write */
4169 bnx2_enable_nvram_write(bp);
4171 for (addr = page_start; addr < data_start;
4172 addr += 4, i += 4) {
4174 rc = bnx2_nvram_write_dword(bp, addr,
4175 &flash_buffer[i], cmd_flags);
4178 goto nvram_write_end;
4184 /* Loop to write the new data from data_start to data_end */
4185 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4186 if ((addr == page_end - 4) ||
4187 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4188 (addr == data_end - 4))) {
4190 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4192 rc = bnx2_nvram_write_dword(bp, addr, buf,
4196 goto nvram_write_end;
4202 /* Loop to write back the buffer data from data_end
4204 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4205 for (addr = data_end; addr < page_end;
4206 addr += 4, i += 4) {
4208 if (addr == page_end-4) {
4209 cmd_flags = BNX2_NVM_COMMAND_LAST;
4211 rc = bnx2_nvram_write_dword(bp, addr,
4212 &flash_buffer[i], cmd_flags);
4215 goto nvram_write_end;
4221 /* Disable writes to flash interface (lock write-protect) */
4222 bnx2_disable_nvram_write(bp);
4224 /* Disable access to flash interface */
4225 bnx2_disable_nvram_access(bp);
4226 bnx2_release_nvram_lock(bp);
4228 /* Increment written */
4229 written += data_end - data_start;
4233 kfree(flash_buffer);
4239 bnx2_init_remote_phy(struct bnx2 *bp)
4243 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4244 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4247 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4248 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4251 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4252 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4254 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4255 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4256 bp->phy_port = PORT_FIBRE;
4258 bp->phy_port = PORT_TP;
4260 if (netif_running(bp->dev)) {
4263 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4264 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4265 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4271 bnx2_setup_msix_tbl(struct bnx2 *bp)
4273 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4275 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4276 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4280 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4286 /* Wait for the current PCI transaction to complete before
4287 * issuing a reset. */
4288 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4289 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4290 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4291 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4292 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4293 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4296 /* Wait for the firmware to tell us it is ok to issue a reset. */
4297 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4299 /* Deposit a driver reset signature so the firmware knows that
4300 * this is a soft reset. */
4301 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4302 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4304 /* Do a dummy read to force the chip to complete all current transaction
4305 * before we issue a reset. */
4306 val = REG_RD(bp, BNX2_MISC_ID);
4308 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4309 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4310 REG_RD(bp, BNX2_MISC_COMMAND);
4313 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4314 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4316 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4319 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4320 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4321 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4324 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4326 /* Reading back any register after chip reset will hang the
4327 * bus on 5706 A0 and A1. The msleep below provides plenty
4328 * of margin for write posting.
4330 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4331 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4334 /* Reset takes approximate 30 usec */
4335 for (i = 0; i < 10; i++) {
4336 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4337 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4338 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4343 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4344 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4345 printk(KERN_ERR PFX "Chip reset did not complete\n");
4350 /* Make sure byte swapping is properly configured. */
4351 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4352 if (val != 0x01020304) {
4353 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4357 /* Wait for the firmware to finish its initialization. */
4358 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4362 spin_lock_bh(&bp->phy_lock);
4363 old_port = bp->phy_port;
4364 bnx2_init_remote_phy(bp);
4365 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4366 old_port != bp->phy_port)
4367 bnx2_set_default_remote_link(bp);
4368 spin_unlock_bh(&bp->phy_lock);
4370 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4371 /* Adjust the voltage regular to two steps lower. The default
4372 * of this register is 0x0000000e. */
4373 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4375 /* Remove bad rbuf memory from the free pool. */
4376 rc = bnx2_alloc_bad_rbuf(bp);
4379 if (bp->flags & BNX2_FLAG_USING_MSIX)
4380 bnx2_setup_msix_tbl(bp);
4386 bnx2_init_chip(struct bnx2 *bp)
4391 /* Make sure the interrupt is not active. */
4392 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4394 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4395 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4397 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4399 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4400 DMA_READ_CHANS << 12 |
4401 DMA_WRITE_CHANS << 16;
4403 val |= (0x2 << 20) | (1 << 11);
4405 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4408 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4409 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4410 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4412 REG_WR(bp, BNX2_DMA_CONFIG, val);
4414 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4415 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4416 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4417 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4420 if (bp->flags & BNX2_FLAG_PCIX) {
4423 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4425 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4426 val16 & ~PCI_X_CMD_ERO);
4429 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4430 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4431 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4432 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4434 /* Initialize context mapping and zero out the quick contexts. The
4435 * context block must have already been enabled. */
4436 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4437 rc = bnx2_init_5709_context(bp);
4441 bnx2_init_context(bp);
4443 if ((rc = bnx2_init_cpus(bp)) != 0)
4446 bnx2_init_nvram(bp);
4448 bnx2_set_mac_addr(bp);
4450 val = REG_RD(bp, BNX2_MQ_CONFIG);
4451 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4452 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4453 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4454 val |= BNX2_MQ_CONFIG_HALT_DIS;
4456 REG_WR(bp, BNX2_MQ_CONFIG, val);
4458 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4459 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4460 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4462 val = (BCM_PAGE_BITS - 8) << 24;
4463 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4465 /* Configure page size. */
4466 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4467 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4468 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4469 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4471 val = bp->mac_addr[0] +
4472 (bp->mac_addr[1] << 8) +
4473 (bp->mac_addr[2] << 16) +
4475 (bp->mac_addr[4] << 8) +
4476 (bp->mac_addr[5] << 16);
4477 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4479 /* Program the MTU. Also include 4 bytes for CRC32. */
4480 val = bp->dev->mtu + ETH_HLEN + 4;
4481 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4482 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4483 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4485 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4486 bp->bnx2_napi[i].last_status_idx = 0;
4488 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4490 /* Set up how to generate a link change interrupt. */
4491 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4493 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4494 (u64) bp->status_blk_mapping & 0xffffffff);
4495 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4497 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4498 (u64) bp->stats_blk_mapping & 0xffffffff);
4499 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4500 (u64) bp->stats_blk_mapping >> 32);
4502 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4503 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4505 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4506 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4508 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4509 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4511 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4513 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4515 REG_WR(bp, BNX2_HC_COM_TICKS,
4516 (bp->com_ticks_int << 16) | bp->com_ticks);
4518 REG_WR(bp, BNX2_HC_CMD_TICKS,
4519 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4521 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4522 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4524 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4525 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4527 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4528 val = BNX2_HC_CONFIG_COLLECT_STATS;
4530 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4531 BNX2_HC_CONFIG_COLLECT_STATS;
4534 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4535 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4536 BNX2_HC_SB_CONFIG_1;
4538 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4539 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4542 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4543 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4545 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4546 (bp->tx_quick_cons_trip_int << 16) |
4547 bp->tx_quick_cons_trip);
4549 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4550 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4552 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4555 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4556 val |= BNX2_HC_CONFIG_ONE_SHOT;
4558 REG_WR(bp, BNX2_HC_CONFIG, val);
4560 /* Clear internal stats counters. */
4561 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4563 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4565 /* Initialize the receive filter. */
4566 bnx2_set_rx_mode(bp->dev);
4568 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4569 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4570 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4571 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4573 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4576 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4577 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4581 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4587 bnx2_clear_ring_states(struct bnx2 *bp)
4589 struct bnx2_napi *bnapi;
4590 struct bnx2_tx_ring_info *txr;
4591 struct bnx2_rx_ring_info *rxr;
4594 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4595 bnapi = &bp->bnx2_napi[i];
4596 txr = &bnapi->tx_ring;
4597 rxr = &bnapi->rx_ring;
4600 txr->hw_tx_cons = 0;
4601 rxr->rx_prod_bseq = 0;
4604 rxr->rx_pg_prod = 0;
4605 rxr->rx_pg_cons = 0;
4610 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4612 u32 val, offset0, offset1, offset2, offset3;
4613 u32 cid_addr = GET_CID_ADDR(cid);
4615 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4616 offset0 = BNX2_L2CTX_TYPE_XI;
4617 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4618 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4619 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4621 offset0 = BNX2_L2CTX_TYPE;
4622 offset1 = BNX2_L2CTX_CMD_TYPE;
4623 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4624 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4626 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4627 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4629 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4630 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4632 val = (u64) txr->tx_desc_mapping >> 32;
4633 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4635 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4636 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4640 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4644 struct bnx2_napi *bnapi;
4645 struct bnx2_tx_ring_info *txr;
4647 bnapi = &bp->bnx2_napi[ring_num];
4648 txr = &bnapi->tx_ring;
4653 cid = TX_TSS_CID + ring_num - 1;
4655 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4657 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4659 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4660 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4663 txr->tx_prod_bseq = 0;
4665 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4666 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4668 bnx2_init_tx_context(bp, cid, txr);
4672 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4678 for (i = 0; i < num_rings; i++) {
4681 rxbd = &rx_ring[i][0];
4682 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4683 rxbd->rx_bd_len = buf_size;
4684 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4686 if (i == (num_rings - 1))
4690 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4691 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4696 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4699 u16 prod, ring_prod;
4700 u32 cid, rx_cid_addr, val;
4701 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4702 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4707 cid = RX_RSS_CID + ring_num - 1;
4709 rx_cid_addr = GET_CID_ADDR(cid);
4711 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4712 bp->rx_buf_use_size, bp->rx_max_ring);
4714 bnx2_init_rx_context(bp, cid);
4716 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4717 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4718 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4721 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4722 if (bp->rx_pg_ring_size) {
4723 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4724 rxr->rx_pg_desc_mapping,
4725 PAGE_SIZE, bp->rx_max_pg_ring);
4726 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4727 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4728 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4729 BNX2_L2CTX_RBDC_JUMBO_KEY);
4731 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4732 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4734 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4735 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4737 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4738 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4741 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4742 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4744 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4745 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4747 ring_prod = prod = rxr->rx_pg_prod;
4748 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4749 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4751 prod = NEXT_RX_BD(prod);
4752 ring_prod = RX_PG_RING_IDX(prod);
4754 rxr->rx_pg_prod = prod;
4756 ring_prod = prod = rxr->rx_prod;
4757 for (i = 0; i < bp->rx_ring_size; i++) {
4758 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4760 prod = NEXT_RX_BD(prod);
4761 ring_prod = RX_RING_IDX(prod);
4763 rxr->rx_prod = prod;
4765 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4766 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4767 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4769 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4770 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4772 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4776 bnx2_init_all_rings(struct bnx2 *bp)
4780 bnx2_clear_ring_states(bp);
4782 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4783 for (i = 0; i < bp->num_tx_rings; i++)
4784 bnx2_init_tx_ring(bp, i);
4786 if (bp->num_tx_rings > 1)
4787 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4790 for (i = 0; i < bp->num_rx_rings; i++)
4791 bnx2_init_rx_ring(bp, i);
4794 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4796 u32 max, num_rings = 1;
4798 while (ring_size > MAX_RX_DESC_CNT) {
4799 ring_size -= MAX_RX_DESC_CNT;
4802 /* round to next power of 2 */
4804 while ((max & num_rings) == 0)
4807 if (num_rings != max)
4814 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4816 u32 rx_size, rx_space, jumbo_size;
4818 /* 8 for CRC and VLAN */
4819 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4821 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4822 sizeof(struct skb_shared_info);
4824 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4825 bp->rx_pg_ring_size = 0;
4826 bp->rx_max_pg_ring = 0;
4827 bp->rx_max_pg_ring_idx = 0;
4828 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4829 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4831 jumbo_size = size * pages;
4832 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4833 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4835 bp->rx_pg_ring_size = jumbo_size;
4836 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4838 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4839 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4840 bp->rx_copy_thresh = 0;
4843 bp->rx_buf_use_size = rx_size;
4845 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4846 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4847 bp->rx_ring_size = size;
4848 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4849 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4853 bnx2_free_tx_skbs(struct bnx2 *bp)
4857 for (i = 0; i < bp->num_tx_rings; i++) {
4858 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4859 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4862 if (txr->tx_buf_ring == NULL)
4865 for (j = 0; j < TX_DESC_CNT; ) {
4866 struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4867 struct sk_buff *skb = tx_buf->skb;
4875 pci_unmap_single(bp->pdev,
4876 pci_unmap_addr(tx_buf, mapping),
4877 skb_headlen(skb), PCI_DMA_TODEVICE);
4881 last = skb_shinfo(skb)->nr_frags;
4882 for (k = 0; k < last; k++) {
4883 tx_buf = &txr->tx_buf_ring[j + k + 1];
4884 pci_unmap_page(bp->pdev,
4885 pci_unmap_addr(tx_buf, mapping),
4886 skb_shinfo(skb)->frags[j].size,
4896 bnx2_free_rx_skbs(struct bnx2 *bp)
4900 for (i = 0; i < bp->num_rx_rings; i++) {
4901 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4902 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4905 if (rxr->rx_buf_ring == NULL)
4908 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4909 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4910 struct sk_buff *skb = rx_buf->skb;
4915 pci_unmap_single(bp->pdev,
4916 pci_unmap_addr(rx_buf, mapping),
4917 bp->rx_buf_use_size,
4918 PCI_DMA_FROMDEVICE);
4924 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4925 bnx2_free_rx_page(bp, rxr, j);
4930 bnx2_free_skbs(struct bnx2 *bp)
4932 bnx2_free_tx_skbs(bp);
4933 bnx2_free_rx_skbs(bp);
4937 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4941 rc = bnx2_reset_chip(bp, reset_code);
4946 if ((rc = bnx2_init_chip(bp)) != 0)
4949 bnx2_init_all_rings(bp);
4954 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4958 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4961 spin_lock_bh(&bp->phy_lock);
4962 bnx2_init_phy(bp, reset_phy);
4964 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4965 bnx2_remote_phy_event(bp);
4966 spin_unlock_bh(&bp->phy_lock);
4971 bnx2_test_registers(struct bnx2 *bp)
4975 static const struct {
4978 #define BNX2_FL_NOT_5709 1
4982 { 0x006c, 0, 0x00000000, 0x0000003f },
4983 { 0x0090, 0, 0xffffffff, 0x00000000 },
4984 { 0x0094, 0, 0x00000000, 0x00000000 },
4986 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4987 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4988 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4989 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4990 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4991 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4992 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4993 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4994 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4996 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4997 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4998 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4999 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5000 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5001 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5003 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5004 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5005 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5007 { 0x1000, 0, 0x00000000, 0x00000001 },
5008 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5010 { 0x1408, 0, 0x01c00800, 0x00000000 },
5011 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5012 { 0x14a8, 0, 0x00000000, 0x000001ff },
5013 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5014 { 0x14b0, 0, 0x00000002, 0x00000001 },
5015 { 0x14b8, 0, 0x00000000, 0x00000000 },
5016 { 0x14c0, 0, 0x00000000, 0x00000009 },
5017 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5018 { 0x14cc, 0, 0x00000000, 0x00000001 },
5019 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5021 { 0x1800, 0, 0x00000000, 0x00000001 },
5022 { 0x1804, 0, 0x00000000, 0x00000003 },
5024 { 0x2800, 0, 0x00000000, 0x00000001 },
5025 { 0x2804, 0, 0x00000000, 0x00003f01 },
5026 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5027 { 0x2810, 0, 0xffff0000, 0x00000000 },
5028 { 0x2814, 0, 0xffff0000, 0x00000000 },
5029 { 0x2818, 0, 0xffff0000, 0x00000000 },
5030 { 0x281c, 0, 0xffff0000, 0x00000000 },
5031 { 0x2834, 0, 0xffffffff, 0x00000000 },
5032 { 0x2840, 0, 0x00000000, 0xffffffff },
5033 { 0x2844, 0, 0x00000000, 0xffffffff },
5034 { 0x2848, 0, 0xffffffff, 0x00000000 },
5035 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5037 { 0x2c00, 0, 0x00000000, 0x00000011 },
5038 { 0x2c04, 0, 0x00000000, 0x00030007 },
5040 { 0x3c00, 0, 0x00000000, 0x00000001 },
5041 { 0x3c04, 0, 0x00000000, 0x00070000 },
5042 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5043 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5044 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5045 { 0x3c14, 0, 0x00000000, 0xffffffff },
5046 { 0x3c18, 0, 0x00000000, 0xffffffff },
5047 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5048 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5050 { 0x5004, 0, 0x00000000, 0x0000007f },
5051 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5053 { 0x5c00, 0, 0x00000000, 0x00000001 },
5054 { 0x5c04, 0, 0x00000000, 0x0003000f },
5055 { 0x5c08, 0, 0x00000003, 0x00000000 },
5056 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5057 { 0x5c10, 0, 0x00000000, 0xffffffff },
5058 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5059 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5060 { 0x5c88, 0, 0x00000000, 0x00077373 },
5061 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5063 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5064 { 0x680c, 0, 0xffffffff, 0x00000000 },
5065 { 0x6810, 0, 0xffffffff, 0x00000000 },
5066 { 0x6814, 0, 0xffffffff, 0x00000000 },
5067 { 0x6818, 0, 0xffffffff, 0x00000000 },
5068 { 0x681c, 0, 0xffffffff, 0x00000000 },
5069 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5070 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5071 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5072 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5073 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5074 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5075 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5076 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5077 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5078 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5079 { 0x684c, 0, 0xffffffff, 0x00000000 },
5080 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5081 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5082 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5083 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5084 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5085 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5087 { 0xffff, 0, 0x00000000, 0x00000000 },
5092 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5095 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5096 u32 offset, rw_mask, ro_mask, save_val, val;
5097 u16 flags = reg_tbl[i].flags;
5099 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5102 offset = (u32) reg_tbl[i].offset;
5103 rw_mask = reg_tbl[i].rw_mask;
5104 ro_mask = reg_tbl[i].ro_mask;
5106 save_val = readl(bp->regview + offset);
5108 writel(0, bp->regview + offset);
5110 val = readl(bp->regview + offset);
5111 if ((val & rw_mask) != 0) {
5115 if ((val & ro_mask) != (save_val & ro_mask)) {
5119 writel(0xffffffff, bp->regview + offset);
5121 val = readl(bp->regview + offset);
5122 if ((val & rw_mask) != rw_mask) {
5126 if ((val & ro_mask) != (save_val & ro_mask)) {
5130 writel(save_val, bp->regview + offset);
5134 writel(save_val, bp->regview + offset);
5142 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5144 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5145 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5148 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5151 for (offset = 0; offset < size; offset += 4) {
5153 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5155 if (bnx2_reg_rd_ind(bp, start + offset) !=
5165 bnx2_test_memory(struct bnx2 *bp)
5169 static struct mem_entry {
5172 } mem_tbl_5706[] = {
5173 { 0x60000, 0x4000 },
5174 { 0xa0000, 0x3000 },
5175 { 0xe0000, 0x4000 },
5176 { 0x120000, 0x4000 },
5177 { 0x1a0000, 0x4000 },
5178 { 0x160000, 0x4000 },
5182 { 0x60000, 0x4000 },
5183 { 0xa0000, 0x3000 },
5184 { 0xe0000, 0x4000 },
5185 { 0x120000, 0x4000 },
5186 { 0x1a0000, 0x4000 },
5189 struct mem_entry *mem_tbl;
5191 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5192 mem_tbl = mem_tbl_5709;
5194 mem_tbl = mem_tbl_5706;
5196 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5197 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5198 mem_tbl[i].len)) != 0) {
5206 #define BNX2_MAC_LOOPBACK 0
5207 #define BNX2_PHY_LOOPBACK 1
5210 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5212 unsigned int pkt_size, num_pkts, i;
5213 struct sk_buff *skb, *rx_skb;
5214 unsigned char *packet;
5215 u16 rx_start_idx, rx_idx;
5218 struct sw_bd *rx_buf;
5219 struct l2_fhdr *rx_hdr;
5221 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5222 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5223 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5227 txr = &tx_napi->tx_ring;
5228 rxr = &bnapi->rx_ring;
5229 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5230 bp->loopback = MAC_LOOPBACK;
5231 bnx2_set_mac_loopback(bp);
5233 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5234 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5237 bp->loopback = PHY_LOOPBACK;
5238 bnx2_set_phy_loopback(bp);
5243 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5244 skb = netdev_alloc_skb(bp->dev, pkt_size);
5247 packet = skb_put(skb, pkt_size);
5248 memcpy(packet, bp->dev->dev_addr, 6);
5249 memset(packet + 6, 0x0, 8);
5250 for (i = 14; i < pkt_size; i++)
5251 packet[i] = (unsigned char) (i & 0xff);
5253 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5256 REG_WR(bp, BNX2_HC_COMMAND,
5257 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5259 REG_RD(bp, BNX2_HC_COMMAND);
5262 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5266 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5268 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5269 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5270 txbd->tx_bd_mss_nbytes = pkt_size;
5271 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5274 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5275 txr->tx_prod_bseq += pkt_size;
5277 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5278 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5282 REG_WR(bp, BNX2_HC_COMMAND,
5283 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5285 REG_RD(bp, BNX2_HC_COMMAND);
5289 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5292 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5293 goto loopback_test_done;
5295 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5296 if (rx_idx != rx_start_idx + num_pkts) {
5297 goto loopback_test_done;
5300 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5301 rx_skb = rx_buf->skb;
5303 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5304 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5306 pci_dma_sync_single_for_cpu(bp->pdev,
5307 pci_unmap_addr(rx_buf, mapping),
5308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5310 if (rx_hdr->l2_fhdr_status &
5311 (L2_FHDR_ERRORS_BAD_CRC |
5312 L2_FHDR_ERRORS_PHY_DECODE |
5313 L2_FHDR_ERRORS_ALIGNMENT |
5314 L2_FHDR_ERRORS_TOO_SHORT |
5315 L2_FHDR_ERRORS_GIANT_FRAME)) {
5317 goto loopback_test_done;
5320 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5321 goto loopback_test_done;
5324 for (i = 14; i < pkt_size; i++) {
5325 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5326 goto loopback_test_done;
5337 #define BNX2_MAC_LOOPBACK_FAILED 1
5338 #define BNX2_PHY_LOOPBACK_FAILED 2
5339 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5340 BNX2_PHY_LOOPBACK_FAILED)
5343 bnx2_test_loopback(struct bnx2 *bp)
5347 if (!netif_running(bp->dev))
5348 return BNX2_LOOPBACK_FAILED;
5350 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5351 spin_lock_bh(&bp->phy_lock);
5352 bnx2_init_phy(bp, 1);
5353 spin_unlock_bh(&bp->phy_lock);
5354 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5355 rc |= BNX2_MAC_LOOPBACK_FAILED;
5356 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5357 rc |= BNX2_PHY_LOOPBACK_FAILED;
5361 #define NVRAM_SIZE 0x200
5362 #define CRC32_RESIDUAL 0xdebb20e3
5365 bnx2_test_nvram(struct bnx2 *bp)
5367 __be32 buf[NVRAM_SIZE / 4];
5368 u8 *data = (u8 *) buf;
5372 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5373 goto test_nvram_done;
5375 magic = be32_to_cpu(buf[0]);
5376 if (magic != 0x669955aa) {
5378 goto test_nvram_done;
5381 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5382 goto test_nvram_done;
5384 csum = ether_crc_le(0x100, data);
5385 if (csum != CRC32_RESIDUAL) {
5387 goto test_nvram_done;
5390 csum = ether_crc_le(0x100, data + 0x100);
5391 if (csum != CRC32_RESIDUAL) {
5400 bnx2_test_link(struct bnx2 *bp)
5404 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5409 spin_lock_bh(&bp->phy_lock);
5410 bnx2_enable_bmsr1(bp);
5411 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5412 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5413 bnx2_disable_bmsr1(bp);
5414 spin_unlock_bh(&bp->phy_lock);
5416 if (bmsr & BMSR_LSTATUS) {
5423 bnx2_test_intr(struct bnx2 *bp)
5428 if (!netif_running(bp->dev))
5431 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5433 /* This register is not touched during run-time. */
5434 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5435 REG_RD(bp, BNX2_HC_COMMAND);
5437 for (i = 0; i < 10; i++) {
5438 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5444 msleep_interruptible(10);
5452 /* Determining link for parallel detection. */
5454 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5456 u32 mode_ctl, an_dbg, exp;
5458 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5461 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5462 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5464 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5467 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5468 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5469 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5471 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5474 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5475 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5476 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5478 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5485 bnx2_5706_serdes_timer(struct bnx2 *bp)
5489 spin_lock(&bp->phy_lock);
5490 if (bp->serdes_an_pending) {
5491 bp->serdes_an_pending--;
5493 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5496 bp->current_interval = bp->timer_interval;
5498 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5500 if (bmcr & BMCR_ANENABLE) {
5501 if (bnx2_5706_serdes_has_link(bp)) {
5502 bmcr &= ~BMCR_ANENABLE;
5503 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5504 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5505 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5509 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5510 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5513 bnx2_write_phy(bp, 0x17, 0x0f01);
5514 bnx2_read_phy(bp, 0x15, &phy2);
5518 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5519 bmcr |= BMCR_ANENABLE;
5520 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5522 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5525 bp->current_interval = bp->timer_interval;
5530 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5531 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5532 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5534 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5535 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5536 bnx2_5706s_force_link_dn(bp, 1);
5537 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5540 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5543 spin_unlock(&bp->phy_lock);
5547 bnx2_5708_serdes_timer(struct bnx2 *bp)
5549 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5552 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5553 bp->serdes_an_pending = 0;
5557 spin_lock(&bp->phy_lock);
5558 if (bp->serdes_an_pending)
5559 bp->serdes_an_pending--;
5560 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5563 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5564 if (bmcr & BMCR_ANENABLE) {
5565 bnx2_enable_forced_2g5(bp);
5566 bp->current_interval = SERDES_FORCED_TIMEOUT;
5568 bnx2_disable_forced_2g5(bp);
5569 bp->serdes_an_pending = 2;
5570 bp->current_interval = bp->timer_interval;
5574 bp->current_interval = bp->timer_interval;
5576 spin_unlock(&bp->phy_lock);
5580 bnx2_timer(unsigned long data)
5582 struct bnx2 *bp = (struct bnx2 *) data;
5584 if (!netif_running(bp->dev))
5587 if (atomic_read(&bp->intr_sem) != 0)
5588 goto bnx2_restart_timer;
5590 bnx2_send_heart_beat(bp);
5592 bp->stats_blk->stat_FwRxDrop =
5593 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5595 /* workaround occasional corrupted counters */
5596 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5597 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5598 BNX2_HC_COMMAND_STATS_NOW);
5600 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5601 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5602 bnx2_5706_serdes_timer(bp);
5604 bnx2_5708_serdes_timer(bp);
5608 mod_timer(&bp->timer, jiffies + bp->current_interval);
5612 bnx2_request_irq(struct bnx2 *bp)
5614 struct net_device *dev = bp->dev;
5615 unsigned long flags;
5616 struct bnx2_irq *irq;
5619 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5622 flags = IRQF_SHARED;
5624 for (i = 0; i < bp->irq_nvecs; i++) {
5625 irq = &bp->irq_tbl[i];
5626 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5636 bnx2_free_irq(struct bnx2 *bp)
5638 struct net_device *dev = bp->dev;
5639 struct bnx2_irq *irq;
5642 for (i = 0; i < bp->irq_nvecs; i++) {
5643 irq = &bp->irq_tbl[i];
5645 free_irq(irq->vector, dev);
5648 if (bp->flags & BNX2_FLAG_USING_MSI)
5649 pci_disable_msi(bp->pdev);
5650 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5651 pci_disable_msix(bp->pdev);
5653 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5657 bnx2_enable_msix(struct bnx2 *bp)
5660 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5662 bnx2_setup_msix_tbl(bp);
5663 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5664 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5665 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5667 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5668 msix_ent[i].entry = i;
5669 msix_ent[i].vector = 0;
5671 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5673 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5675 bp->irq_tbl[i].handler = bnx2_tx_msix;
5678 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5682 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5683 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5684 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5685 bp->irq_tbl[i].vector = msix_ent[i].vector;
5689 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5691 bp->irq_tbl[0].handler = bnx2_interrupt;
5692 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5694 bp->irq_tbl[0].vector = bp->pdev->irq;
5696 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5697 bnx2_enable_msix(bp);
5699 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5700 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5701 if (pci_enable_msi(bp->pdev) == 0) {
5702 bp->flags |= BNX2_FLAG_USING_MSI;
5703 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5704 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5705 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5707 bp->irq_tbl[0].handler = bnx2_msi;
5709 bp->irq_tbl[0].vector = bp->pdev->irq;
5712 bp->num_tx_rings = 1;
5713 bp->num_rx_rings = 1;
5716 /* Called with rtnl_lock */
5718 bnx2_open(struct net_device *dev)
5720 struct bnx2 *bp = netdev_priv(dev);
5723 netif_carrier_off(dev);
5725 bnx2_set_power_state(bp, PCI_D0);
5726 bnx2_disable_int(bp);
5728 bnx2_setup_int_mode(bp, disable_msi);
5729 bnx2_napi_enable(bp);
5730 rc = bnx2_alloc_mem(bp);
5732 bnx2_napi_disable(bp);
5737 rc = bnx2_request_irq(bp);
5740 bnx2_napi_disable(bp);
5745 rc = bnx2_init_nic(bp, 1);
5748 bnx2_napi_disable(bp);
5755 mod_timer(&bp->timer, jiffies + bp->current_interval);
5757 atomic_set(&bp->intr_sem, 0);
5759 bnx2_enable_int(bp);
5761 if (bp->flags & BNX2_FLAG_USING_MSI) {
5762 /* Test MSI to make sure it is working
5763 * If MSI test fails, go back to INTx mode
5765 if (bnx2_test_intr(bp) != 0) {
5766 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5767 " using MSI, switching to INTx mode. Please"
5768 " report this failure to the PCI maintainer"
5769 " and include system chipset information.\n",
5772 bnx2_disable_int(bp);
5775 bnx2_setup_int_mode(bp, 1);
5777 rc = bnx2_init_nic(bp, 0);
5780 rc = bnx2_request_irq(bp);
5783 bnx2_napi_disable(bp);
5786 del_timer_sync(&bp->timer);
5789 bnx2_enable_int(bp);
5792 if (bp->flags & BNX2_FLAG_USING_MSI)
5793 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5794 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5795 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5797 netif_start_queue(dev);
5803 bnx2_reset_task(struct work_struct *work)
5805 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5807 if (!netif_running(bp->dev))
5810 bnx2_netif_stop(bp);
5812 bnx2_init_nic(bp, 1);
5814 atomic_set(&bp->intr_sem, 1);
5815 bnx2_netif_start(bp);
5819 bnx2_tx_timeout(struct net_device *dev)
5821 struct bnx2 *bp = netdev_priv(dev);
5823 /* This allows the netif to be shutdown gracefully before resetting */
5824 schedule_work(&bp->reset_task);
5828 /* Called with rtnl_lock */
5830 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5832 struct bnx2 *bp = netdev_priv(dev);
5834 bnx2_netif_stop(bp);
5837 bnx2_set_rx_mode(dev);
5839 bnx2_netif_start(bp);
5843 /* Called with netif_tx_lock.
5844 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5845 * netif_wake_queue().
5848 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5850 struct bnx2 *bp = netdev_priv(dev);
5853 struct sw_bd *tx_buf;
5854 u32 len, vlan_tag_flags, last_frag, mss;
5855 u16 prod, ring_prod;
5857 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5858 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5860 if (unlikely(bnx2_tx_avail(bp, txr) <
5861 (skb_shinfo(skb)->nr_frags + 1))) {
5862 netif_stop_queue(dev);
5863 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5866 return NETDEV_TX_BUSY;
5868 len = skb_headlen(skb);
5869 prod = txr->tx_prod;
5870 ring_prod = TX_RING_IDX(prod);
5873 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5874 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5877 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5879 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5881 if ((mss = skb_shinfo(skb)->gso_size)) {
5882 u32 tcp_opt_len, ip_tcp_len;
5885 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5887 tcp_opt_len = tcp_optlen(skb);
5889 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5890 u32 tcp_off = skb_transport_offset(skb) -
5891 sizeof(struct ipv6hdr) - ETH_HLEN;
5893 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5894 TX_BD_FLAGS_SW_FLAGS;
5895 if (likely(tcp_off == 0))
5896 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5899 vlan_tag_flags |= ((tcp_off & 0x3) <<
5900 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5901 ((tcp_off & 0x10) <<
5902 TX_BD_FLAGS_TCP6_OFF4_SHL);
5903 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5906 if (skb_header_cloned(skb) &&
5907 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5909 return NETDEV_TX_OK;
5912 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5916 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5917 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5921 if (tcp_opt_len || (iph->ihl > 5)) {
5922 vlan_tag_flags |= ((iph->ihl - 5) +
5923 (tcp_opt_len >> 2)) << 8;
5929 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5931 tx_buf = &txr->tx_buf_ring[ring_prod];
5933 pci_unmap_addr_set(tx_buf, mapping, mapping);
5935 txbd = &txr->tx_desc_ring[ring_prod];
5937 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5938 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5939 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5940 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5942 last_frag = skb_shinfo(skb)->nr_frags;
5944 for (i = 0; i < last_frag; i++) {
5945 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5947 prod = NEXT_TX_BD(prod);
5948 ring_prod = TX_RING_IDX(prod);
5949 txbd = &txr->tx_desc_ring[ring_prod];
5952 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5953 len, PCI_DMA_TODEVICE);
5954 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5957 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5958 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5959 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5960 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5963 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5965 prod = NEXT_TX_BD(prod);
5966 txr->tx_prod_bseq += skb->len;
5968 REG_WR16(bp, txr->tx_bidx_addr, prod);
5969 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5973 txr->tx_prod = prod;
5974 dev->trans_start = jiffies;
5976 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
5977 netif_stop_queue(dev);
5978 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
5979 netif_wake_queue(dev);
5982 return NETDEV_TX_OK;
5985 /* Called with rtnl_lock */
5987 bnx2_close(struct net_device *dev)
5989 struct bnx2 *bp = netdev_priv(dev);
5992 cancel_work_sync(&bp->reset_task);
5994 bnx2_disable_int_sync(bp);
5995 bnx2_napi_disable(bp);
5996 del_timer_sync(&bp->timer);
5997 if (bp->flags & BNX2_FLAG_NO_WOL)
5998 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6000 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6002 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6003 bnx2_reset_chip(bp, reset_code);
6008 netif_carrier_off(bp->dev);
6009 bnx2_set_power_state(bp, PCI_D3hot);
6013 #define GET_NET_STATS64(ctr) \
6014 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6015 (unsigned long) (ctr##_lo)
6017 #define GET_NET_STATS32(ctr) \
6020 #if (BITS_PER_LONG == 64)
6021 #define GET_NET_STATS GET_NET_STATS64
6023 #define GET_NET_STATS GET_NET_STATS32
6026 static struct net_device_stats *
6027 bnx2_get_stats(struct net_device *dev)
6029 struct bnx2 *bp = netdev_priv(dev);
6030 struct statistics_block *stats_blk = bp->stats_blk;
6031 struct net_device_stats *net_stats = &bp->net_stats;
6033 if (bp->stats_blk == NULL) {
6036 net_stats->rx_packets =
6037 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6038 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6039 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6041 net_stats->tx_packets =
6042 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6043 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6044 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6046 net_stats->rx_bytes =
6047 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6049 net_stats->tx_bytes =
6050 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6052 net_stats->multicast =
6053 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6055 net_stats->collisions =
6056 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6058 net_stats->rx_length_errors =
6059 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6060 stats_blk->stat_EtherStatsOverrsizePkts);
6062 net_stats->rx_over_errors =
6063 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6065 net_stats->rx_frame_errors =
6066 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6068 net_stats->rx_crc_errors =
6069 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6071 net_stats->rx_errors = net_stats->rx_length_errors +
6072 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6073 net_stats->rx_crc_errors;
6075 net_stats->tx_aborted_errors =
6076 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6077 stats_blk->stat_Dot3StatsLateCollisions);
6079 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6080 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6081 net_stats->tx_carrier_errors = 0;
6083 net_stats->tx_carrier_errors =
6085 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6088 net_stats->tx_errors =
6090 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6092 net_stats->tx_aborted_errors +
6093 net_stats->tx_carrier_errors;
6095 net_stats->rx_missed_errors =
6096 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6097 stats_blk->stat_FwRxDrop);
6102 /* All ethtool functions called with rtnl_lock */
6105 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6107 struct bnx2 *bp = netdev_priv(dev);
6108 int support_serdes = 0, support_copper = 0;
6110 cmd->supported = SUPPORTED_Autoneg;
6111 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6114 } else if (bp->phy_port == PORT_FIBRE)
6119 if (support_serdes) {
6120 cmd->supported |= SUPPORTED_1000baseT_Full |
6122 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6123 cmd->supported |= SUPPORTED_2500baseX_Full;
6126 if (support_copper) {
6127 cmd->supported |= SUPPORTED_10baseT_Half |
6128 SUPPORTED_10baseT_Full |
6129 SUPPORTED_100baseT_Half |
6130 SUPPORTED_100baseT_Full |
6131 SUPPORTED_1000baseT_Full |
6136 spin_lock_bh(&bp->phy_lock);
6137 cmd->port = bp->phy_port;
6138 cmd->advertising = bp->advertising;
6140 if (bp->autoneg & AUTONEG_SPEED) {
6141 cmd->autoneg = AUTONEG_ENABLE;
6144 cmd->autoneg = AUTONEG_DISABLE;
6147 if (netif_carrier_ok(dev)) {
6148 cmd->speed = bp->line_speed;
6149 cmd->duplex = bp->duplex;
6155 spin_unlock_bh(&bp->phy_lock);
6157 cmd->transceiver = XCVR_INTERNAL;
6158 cmd->phy_address = bp->phy_addr;
6164 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6166 struct bnx2 *bp = netdev_priv(dev);
6167 u8 autoneg = bp->autoneg;
6168 u8 req_duplex = bp->req_duplex;
6169 u16 req_line_speed = bp->req_line_speed;
6170 u32 advertising = bp->advertising;
6173 spin_lock_bh(&bp->phy_lock);
6175 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6176 goto err_out_unlock;
6178 if (cmd->port != bp->phy_port &&
6179 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6180 goto err_out_unlock;
6182 if (cmd->autoneg == AUTONEG_ENABLE) {
6183 autoneg |= AUTONEG_SPEED;
6185 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6187 /* allow advertising 1 speed */
6188 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6189 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6190 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6191 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6193 if (cmd->port == PORT_FIBRE)
6194 goto err_out_unlock;
6196 advertising = cmd->advertising;
6198 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6199 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6200 (cmd->port == PORT_TP))
6201 goto err_out_unlock;
6202 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6203 advertising = cmd->advertising;
6204 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6205 goto err_out_unlock;
6207 if (cmd->port == PORT_FIBRE)
6208 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6210 advertising = ETHTOOL_ALL_COPPER_SPEED;
6212 advertising |= ADVERTISED_Autoneg;
6215 if (cmd->port == PORT_FIBRE) {
6216 if ((cmd->speed != SPEED_1000 &&
6217 cmd->speed != SPEED_2500) ||
6218 (cmd->duplex != DUPLEX_FULL))
6219 goto err_out_unlock;
6221 if (cmd->speed == SPEED_2500 &&
6222 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6223 goto err_out_unlock;
6225 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6226 goto err_out_unlock;
6228 autoneg &= ~AUTONEG_SPEED;
6229 req_line_speed = cmd->speed;
6230 req_duplex = cmd->duplex;
6234 bp->autoneg = autoneg;
6235 bp->advertising = advertising;
6236 bp->req_line_speed = req_line_speed;
6237 bp->req_duplex = req_duplex;
6239 err = bnx2_setup_phy(bp, cmd->port);
6242 spin_unlock_bh(&bp->phy_lock);
6248 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6250 struct bnx2 *bp = netdev_priv(dev);
6252 strcpy(info->driver, DRV_MODULE_NAME);
6253 strcpy(info->version, DRV_MODULE_VERSION);
6254 strcpy(info->bus_info, pci_name(bp->pdev));
6255 strcpy(info->fw_version, bp->fw_version);
6258 #define BNX2_REGDUMP_LEN (32 * 1024)
6261 bnx2_get_regs_len(struct net_device *dev)
6263 return BNX2_REGDUMP_LEN;
6267 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6269 u32 *p = _p, i, offset;
6271 struct bnx2 *bp = netdev_priv(dev);
6272 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6273 0x0800, 0x0880, 0x0c00, 0x0c10,
6274 0x0c30, 0x0d08, 0x1000, 0x101c,
6275 0x1040, 0x1048, 0x1080, 0x10a4,
6276 0x1400, 0x1490, 0x1498, 0x14f0,
6277 0x1500, 0x155c, 0x1580, 0x15dc,
6278 0x1600, 0x1658, 0x1680, 0x16d8,
6279 0x1800, 0x1820, 0x1840, 0x1854,
6280 0x1880, 0x1894, 0x1900, 0x1984,
6281 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6282 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6283 0x2000, 0x2030, 0x23c0, 0x2400,
6284 0x2800, 0x2820, 0x2830, 0x2850,
6285 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6286 0x3c00, 0x3c94, 0x4000, 0x4010,
6287 0x4080, 0x4090, 0x43c0, 0x4458,
6288 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6289 0x4fc0, 0x5010, 0x53c0, 0x5444,
6290 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6291 0x5fc0, 0x6000, 0x6400, 0x6428,
6292 0x6800, 0x6848, 0x684c, 0x6860,
6293 0x6888, 0x6910, 0x8000 };
6297 memset(p, 0, BNX2_REGDUMP_LEN);
6299 if (!netif_running(bp->dev))
6303 offset = reg_boundaries[0];
6305 while (offset < BNX2_REGDUMP_LEN) {
6306 *p++ = REG_RD(bp, offset);
6308 if (offset == reg_boundaries[i + 1]) {
6309 offset = reg_boundaries[i + 2];
6310 p = (u32 *) (orig_p + offset);
6317 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6319 struct bnx2 *bp = netdev_priv(dev);
6321 if (bp->flags & BNX2_FLAG_NO_WOL) {
6326 wol->supported = WAKE_MAGIC;
6328 wol->wolopts = WAKE_MAGIC;
6332 memset(&wol->sopass, 0, sizeof(wol->sopass));
6336 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6338 struct bnx2 *bp = netdev_priv(dev);
6340 if (wol->wolopts & ~WAKE_MAGIC)
6343 if (wol->wolopts & WAKE_MAGIC) {
6344 if (bp->flags & BNX2_FLAG_NO_WOL)
6356 bnx2_nway_reset(struct net_device *dev)
6358 struct bnx2 *bp = netdev_priv(dev);
6361 if (!(bp->autoneg & AUTONEG_SPEED)) {
6365 spin_lock_bh(&bp->phy_lock);
6367 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6370 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6371 spin_unlock_bh(&bp->phy_lock);
6375 /* Force a link down visible on the other side */
6376 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6377 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6378 spin_unlock_bh(&bp->phy_lock);
6382 spin_lock_bh(&bp->phy_lock);
6384 bp->current_interval = SERDES_AN_TIMEOUT;
6385 bp->serdes_an_pending = 1;
6386 mod_timer(&bp->timer, jiffies + bp->current_interval);
6389 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6390 bmcr &= ~BMCR_LOOPBACK;
6391 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6393 spin_unlock_bh(&bp->phy_lock);
6399 bnx2_get_eeprom_len(struct net_device *dev)
6401 struct bnx2 *bp = netdev_priv(dev);
6403 if (bp->flash_info == NULL)
6406 return (int) bp->flash_size;
6410 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6413 struct bnx2 *bp = netdev_priv(dev);
6416 /* parameters already validated in ethtool_get_eeprom */
6418 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6424 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6427 struct bnx2 *bp = netdev_priv(dev);
6430 /* parameters already validated in ethtool_set_eeprom */
6432 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6438 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6440 struct bnx2 *bp = netdev_priv(dev);
6442 memset(coal, 0, sizeof(struct ethtool_coalesce));
6444 coal->rx_coalesce_usecs = bp->rx_ticks;
6445 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6446 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6447 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6449 coal->tx_coalesce_usecs = bp->tx_ticks;
6450 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6451 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6452 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6454 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6460 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6462 struct bnx2 *bp = netdev_priv(dev);
6464 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6465 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6467 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6468 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6470 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6471 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6473 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6474 if (bp->rx_quick_cons_trip_int > 0xff)
6475 bp->rx_quick_cons_trip_int = 0xff;
6477 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6478 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6480 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6481 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6483 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6484 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6486 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6487 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6490 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6491 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6492 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6493 bp->stats_ticks = USEC_PER_SEC;
6495 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6496 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6497 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6499 if (netif_running(bp->dev)) {
6500 bnx2_netif_stop(bp);
6501 bnx2_init_nic(bp, 0);
6502 bnx2_netif_start(bp);
6509 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6511 struct bnx2 *bp = netdev_priv(dev);
6513 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6514 ering->rx_mini_max_pending = 0;
6515 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6517 ering->rx_pending = bp->rx_ring_size;
6518 ering->rx_mini_pending = 0;
6519 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6521 ering->tx_max_pending = MAX_TX_DESC_CNT;
6522 ering->tx_pending = bp->tx_ring_size;
6526 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6528 if (netif_running(bp->dev)) {
6529 bnx2_netif_stop(bp);
6530 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6535 bnx2_set_rx_ring_size(bp, rx);
6536 bp->tx_ring_size = tx;
6538 if (netif_running(bp->dev)) {
6541 rc = bnx2_alloc_mem(bp);
6544 bnx2_init_nic(bp, 0);
6545 bnx2_netif_start(bp);
6551 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6553 struct bnx2 *bp = netdev_priv(dev);
6556 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6557 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6558 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6562 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6567 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6569 struct bnx2 *bp = netdev_priv(dev);
6571 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6572 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6573 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6577 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6579 struct bnx2 *bp = netdev_priv(dev);
6581 bp->req_flow_ctrl = 0;
6582 if (epause->rx_pause)
6583 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6584 if (epause->tx_pause)
6585 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6587 if (epause->autoneg) {
6588 bp->autoneg |= AUTONEG_FLOW_CTRL;
6591 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6594 spin_lock_bh(&bp->phy_lock);
6596 bnx2_setup_phy(bp, bp->phy_port);
6598 spin_unlock_bh(&bp->phy_lock);
6604 bnx2_get_rx_csum(struct net_device *dev)
6606 struct bnx2 *bp = netdev_priv(dev);
6612 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6614 struct bnx2 *bp = netdev_priv(dev);
6621 bnx2_set_tso(struct net_device *dev, u32 data)
6623 struct bnx2 *bp = netdev_priv(dev);
6626 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6627 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6628 dev->features |= NETIF_F_TSO6;
6630 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6635 #define BNX2_NUM_STATS 46
6638 char string[ETH_GSTRING_LEN];
6639 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6641 { "rx_error_bytes" },
6643 { "tx_error_bytes" },
6644 { "rx_ucast_packets" },
6645 { "rx_mcast_packets" },
6646 { "rx_bcast_packets" },
6647 { "tx_ucast_packets" },
6648 { "tx_mcast_packets" },
6649 { "tx_bcast_packets" },
6650 { "tx_mac_errors" },
6651 { "tx_carrier_errors" },
6652 { "rx_crc_errors" },
6653 { "rx_align_errors" },
6654 { "tx_single_collisions" },
6655 { "tx_multi_collisions" },
6657 { "tx_excess_collisions" },
6658 { "tx_late_collisions" },
6659 { "tx_total_collisions" },
6662 { "rx_undersize_packets" },
6663 { "rx_oversize_packets" },
6664 { "rx_64_byte_packets" },
6665 { "rx_65_to_127_byte_packets" },
6666 { "rx_128_to_255_byte_packets" },
6667 { "rx_256_to_511_byte_packets" },
6668 { "rx_512_to_1023_byte_packets" },
6669 { "rx_1024_to_1522_byte_packets" },
6670 { "rx_1523_to_9022_byte_packets" },
6671 { "tx_64_byte_packets" },
6672 { "tx_65_to_127_byte_packets" },
6673 { "tx_128_to_255_byte_packets" },
6674 { "tx_256_to_511_byte_packets" },
6675 { "tx_512_to_1023_byte_packets" },
6676 { "tx_1024_to_1522_byte_packets" },
6677 { "tx_1523_to_9022_byte_packets" },
6678 { "rx_xon_frames" },
6679 { "rx_xoff_frames" },
6680 { "tx_xon_frames" },
6681 { "tx_xoff_frames" },
6682 { "rx_mac_ctrl_frames" },
6683 { "rx_filtered_packets" },
6685 { "rx_fw_discards" },
6688 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6690 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6691 STATS_OFFSET32(stat_IfHCInOctets_hi),
6692 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6693 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6694 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6695 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6696 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6697 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6698 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6699 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6700 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6701 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6702 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6703 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6704 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6705 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6706 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6707 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6708 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6709 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6710 STATS_OFFSET32(stat_EtherStatsCollisions),
6711 STATS_OFFSET32(stat_EtherStatsFragments),
6712 STATS_OFFSET32(stat_EtherStatsJabbers),
6713 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6714 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6715 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6716 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6717 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6718 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6719 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6720 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6721 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6722 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6723 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6724 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6725 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6726 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6727 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6728 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6729 STATS_OFFSET32(stat_XonPauseFramesReceived),
6730 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6731 STATS_OFFSET32(stat_OutXonSent),
6732 STATS_OFFSET32(stat_OutXoffSent),
6733 STATS_OFFSET32(stat_MacControlFramesReceived),
6734 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6735 STATS_OFFSET32(stat_IfInMBUFDiscards),
6736 STATS_OFFSET32(stat_FwRxDrop),
6739 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6740 * skipped because of errata.
6742 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6743 8,0,8,8,8,8,8,8,8,8,
6744 4,0,4,4,4,4,4,4,4,4,
6745 4,4,4,4,4,4,4,4,4,4,
6746 4,4,4,4,4,4,4,4,4,4,
6750 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6751 8,0,8,8,8,8,8,8,8,8,
6752 4,4,4,4,4,4,4,4,4,4,
6753 4,4,4,4,4,4,4,4,4,4,
6754 4,4,4,4,4,4,4,4,4,4,
6758 #define BNX2_NUM_TESTS 6
6761 char string[ETH_GSTRING_LEN];
6762 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6763 { "register_test (offline)" },
6764 { "memory_test (offline)" },
6765 { "loopback_test (offline)" },
6766 { "nvram_test (online)" },
6767 { "interrupt_test (online)" },
6768 { "link_test (online)" },
6772 bnx2_get_sset_count(struct net_device *dev, int sset)
6776 return BNX2_NUM_TESTS;
6778 return BNX2_NUM_STATS;
6785 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6787 struct bnx2 *bp = netdev_priv(dev);
6789 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6790 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6793 bnx2_netif_stop(bp);
6794 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6797 if (bnx2_test_registers(bp) != 0) {
6799 etest->flags |= ETH_TEST_FL_FAILED;
6801 if (bnx2_test_memory(bp) != 0) {
6803 etest->flags |= ETH_TEST_FL_FAILED;
6805 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6806 etest->flags |= ETH_TEST_FL_FAILED;
6808 if (!netif_running(bp->dev)) {
6809 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6812 bnx2_init_nic(bp, 1);
6813 bnx2_netif_start(bp);
6816 /* wait for link up */
6817 for (i = 0; i < 7; i++) {
6820 msleep_interruptible(1000);
6824 if (bnx2_test_nvram(bp) != 0) {
6826 etest->flags |= ETH_TEST_FL_FAILED;
6828 if (bnx2_test_intr(bp) != 0) {
6830 etest->flags |= ETH_TEST_FL_FAILED;
6833 if (bnx2_test_link(bp) != 0) {
6835 etest->flags |= ETH_TEST_FL_FAILED;
6841 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6843 switch (stringset) {
6845 memcpy(buf, bnx2_stats_str_arr,
6846 sizeof(bnx2_stats_str_arr));
6849 memcpy(buf, bnx2_tests_str_arr,
6850 sizeof(bnx2_tests_str_arr));
6856 bnx2_get_ethtool_stats(struct net_device *dev,
6857 struct ethtool_stats *stats, u64 *buf)
6859 struct bnx2 *bp = netdev_priv(dev);
6861 u32 *hw_stats = (u32 *) bp->stats_blk;
6862 u8 *stats_len_arr = NULL;
6864 if (hw_stats == NULL) {
6865 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6869 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6870 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6871 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6872 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6873 stats_len_arr = bnx2_5706_stats_len_arr;
6875 stats_len_arr = bnx2_5708_stats_len_arr;
6877 for (i = 0; i < BNX2_NUM_STATS; i++) {
6878 if (stats_len_arr[i] == 0) {
6879 /* skip this counter */
6883 if (stats_len_arr[i] == 4) {
6884 /* 4-byte counter */
6886 *(hw_stats + bnx2_stats_offset_arr[i]);
6889 /* 8-byte counter */
6890 buf[i] = (((u64) *(hw_stats +
6891 bnx2_stats_offset_arr[i])) << 32) +
6892 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6897 bnx2_phys_id(struct net_device *dev, u32 data)
6899 struct bnx2 *bp = netdev_priv(dev);
6906 save = REG_RD(bp, BNX2_MISC_CFG);
6907 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6909 for (i = 0; i < (data * 2); i++) {
6911 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6914 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6915 BNX2_EMAC_LED_1000MB_OVERRIDE |
6916 BNX2_EMAC_LED_100MB_OVERRIDE |
6917 BNX2_EMAC_LED_10MB_OVERRIDE |
6918 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6919 BNX2_EMAC_LED_TRAFFIC);
6921 msleep_interruptible(500);
6922 if (signal_pending(current))
6925 REG_WR(bp, BNX2_EMAC_LED, 0);
6926 REG_WR(bp, BNX2_MISC_CFG, save);
6931 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6933 struct bnx2 *bp = netdev_priv(dev);
6935 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6936 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6938 return (ethtool_op_set_tx_csum(dev, data));
6941 static const struct ethtool_ops bnx2_ethtool_ops = {
6942 .get_settings = bnx2_get_settings,
6943 .set_settings = bnx2_set_settings,
6944 .get_drvinfo = bnx2_get_drvinfo,
6945 .get_regs_len = bnx2_get_regs_len,
6946 .get_regs = bnx2_get_regs,
6947 .get_wol = bnx2_get_wol,
6948 .set_wol = bnx2_set_wol,
6949 .nway_reset = bnx2_nway_reset,
6950 .get_link = ethtool_op_get_link,
6951 .get_eeprom_len = bnx2_get_eeprom_len,
6952 .get_eeprom = bnx2_get_eeprom,
6953 .set_eeprom = bnx2_set_eeprom,
6954 .get_coalesce = bnx2_get_coalesce,
6955 .set_coalesce = bnx2_set_coalesce,
6956 .get_ringparam = bnx2_get_ringparam,
6957 .set_ringparam = bnx2_set_ringparam,
6958 .get_pauseparam = bnx2_get_pauseparam,
6959 .set_pauseparam = bnx2_set_pauseparam,
6960 .get_rx_csum = bnx2_get_rx_csum,
6961 .set_rx_csum = bnx2_set_rx_csum,
6962 .set_tx_csum = bnx2_set_tx_csum,
6963 .set_sg = ethtool_op_set_sg,
6964 .set_tso = bnx2_set_tso,
6965 .self_test = bnx2_self_test,
6966 .get_strings = bnx2_get_strings,
6967 .phys_id = bnx2_phys_id,
6968 .get_ethtool_stats = bnx2_get_ethtool_stats,
6969 .get_sset_count = bnx2_get_sset_count,
6972 /* Called with rtnl_lock */
6974 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6976 struct mii_ioctl_data *data = if_mii(ifr);
6977 struct bnx2 *bp = netdev_priv(dev);
6982 data->phy_id = bp->phy_addr;
6988 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6991 if (!netif_running(dev))
6994 spin_lock_bh(&bp->phy_lock);
6995 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6996 spin_unlock_bh(&bp->phy_lock);
6998 data->val_out = mii_regval;
7004 if (!capable(CAP_NET_ADMIN))
7007 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7010 if (!netif_running(dev))
7013 spin_lock_bh(&bp->phy_lock);
7014 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7015 spin_unlock_bh(&bp->phy_lock);
7026 /* Called with rtnl_lock */
7028 bnx2_change_mac_addr(struct net_device *dev, void *p)
7030 struct sockaddr *addr = p;
7031 struct bnx2 *bp = netdev_priv(dev);
7033 if (!is_valid_ether_addr(addr->sa_data))
7036 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7037 if (netif_running(dev))
7038 bnx2_set_mac_addr(bp);
7043 /* Called with rtnl_lock */
7045 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7047 struct bnx2 *bp = netdev_priv(dev);
7049 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7050 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7054 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7057 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7059 poll_bnx2(struct net_device *dev)
7061 struct bnx2 *bp = netdev_priv(dev);
7063 disable_irq(bp->pdev->irq);
7064 bnx2_interrupt(bp->pdev->irq, dev);
7065 enable_irq(bp->pdev->irq);
7069 static void __devinit
7070 bnx2_get_5709_media(struct bnx2 *bp)
7072 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7073 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7076 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7078 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7079 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7083 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7084 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7086 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7088 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7093 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7101 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7107 static void __devinit
7108 bnx2_get_pci_speed(struct bnx2 *bp)
7112 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7113 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7116 bp->flags |= BNX2_FLAG_PCIX;
7118 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7120 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7122 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7123 bp->bus_speed_mhz = 133;
7126 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7127 bp->bus_speed_mhz = 100;
7130 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7131 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7132 bp->bus_speed_mhz = 66;
7135 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7136 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7137 bp->bus_speed_mhz = 50;
7140 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7141 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7142 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7143 bp->bus_speed_mhz = 33;
7148 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7149 bp->bus_speed_mhz = 66;
7151 bp->bus_speed_mhz = 33;
7154 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7155 bp->flags |= BNX2_FLAG_PCI_32BIT;
7159 static int __devinit
7160 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7163 unsigned long mem_len;
7166 u64 dma_mask, persist_dma_mask;
7168 SET_NETDEV_DEV(dev, &pdev->dev);
7169 bp = netdev_priv(dev);
7174 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7175 rc = pci_enable_device(pdev);
7177 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7181 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7183 "Cannot find PCI device base address, aborting.\n");
7185 goto err_out_disable;
7188 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7190 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7191 goto err_out_disable;
7194 pci_set_master(pdev);
7195 pci_save_state(pdev);
7197 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7198 if (bp->pm_cap == 0) {
7200 "Cannot find power management capability, aborting.\n");
7202 goto err_out_release;
7208 spin_lock_init(&bp->phy_lock);
7209 spin_lock_init(&bp->indirect_lock);
7210 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7212 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7213 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7214 dev->mem_end = dev->mem_start + mem_len;
7215 dev->irq = pdev->irq;
7217 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7220 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7222 goto err_out_release;
7225 /* Configure byte swap and enable write to the reg_window registers.
7226 * Rely on CPU to do target byte swapping on big endian systems
7227 * The chip's target access swapping will not swap all accesses
7229 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7230 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7231 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7233 bnx2_set_power_state(bp, PCI_D0);
7235 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7238 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7240 "Cannot find PCIE capability, aborting.\n");
7244 bp->flags |= BNX2_FLAG_PCIE;
7245 if (CHIP_REV(bp) == CHIP_REV_Ax)
7246 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7248 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7249 if (bp->pcix_cap == 0) {
7251 "Cannot find PCIX capability, aborting.\n");
7257 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7258 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7259 bp->flags |= BNX2_FLAG_MSIX_CAP;
7262 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7263 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7264 bp->flags |= BNX2_FLAG_MSI_CAP;
7267 /* 5708 cannot support DMA addresses > 40-bit. */
7268 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7269 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7271 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7273 /* Configure DMA attributes. */
7274 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7275 dev->features |= NETIF_F_HIGHDMA;
7276 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7279 "pci_set_consistent_dma_mask failed, aborting.\n");
7282 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7283 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7287 if (!(bp->flags & BNX2_FLAG_PCIE))
7288 bnx2_get_pci_speed(bp);
7290 /* 5706A0 may falsely detect SERR and PERR. */
7291 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7292 reg = REG_RD(bp, PCI_COMMAND);
7293 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7294 REG_WR(bp, PCI_COMMAND, reg);
7296 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7297 !(bp->flags & BNX2_FLAG_PCIX)) {
7300 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7304 bnx2_init_nvram(bp);
7306 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7308 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7309 BNX2_SHM_HDR_SIGNATURE_SIG) {
7310 u32 off = PCI_FUNC(pdev->devfn) << 2;
7312 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7314 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7316 /* Get the permanent MAC address. First we need to make sure the
7317 * firmware is actually running.
7319 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7321 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7322 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7323 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7328 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7329 for (i = 0, j = 0; i < 3; i++) {
7332 num = (u8) (reg >> (24 - (i * 8)));
7333 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7334 if (num >= k || !skip0 || k == 1) {
7335 bp->fw_version[j++] = (num / k) + '0';
7340 bp->fw_version[j++] = '.';
7342 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7343 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7346 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7347 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7349 for (i = 0; i < 30; i++) {
7350 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7351 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7356 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7357 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7358 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7359 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7361 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7363 bp->fw_version[j++] = ' ';
7364 for (i = 0; i < 3; i++) {
7365 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7367 memcpy(&bp->fw_version[j], ®, 4);
7372 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7373 bp->mac_addr[0] = (u8) (reg >> 8);
7374 bp->mac_addr[1] = (u8) reg;
7376 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7377 bp->mac_addr[2] = (u8) (reg >> 24);
7378 bp->mac_addr[3] = (u8) (reg >> 16);
7379 bp->mac_addr[4] = (u8) (reg >> 8);
7380 bp->mac_addr[5] = (u8) reg;
7382 bp->tx_ring_size = MAX_TX_DESC_CNT;
7383 bnx2_set_rx_ring_size(bp, 255);
7387 bp->tx_quick_cons_trip_int = 20;
7388 bp->tx_quick_cons_trip = 20;
7389 bp->tx_ticks_int = 80;
7392 bp->rx_quick_cons_trip_int = 6;
7393 bp->rx_quick_cons_trip = 6;
7394 bp->rx_ticks_int = 18;
7397 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7399 bp->timer_interval = HZ;
7400 bp->current_interval = HZ;
7404 /* Disable WOL support if we are running on a SERDES chip. */
7405 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7406 bnx2_get_5709_media(bp);
7407 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7408 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7410 bp->phy_port = PORT_TP;
7411 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7412 bp->phy_port = PORT_FIBRE;
7413 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7414 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7415 bp->flags |= BNX2_FLAG_NO_WOL;
7418 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7419 /* Don't do parallel detect on this board because of
7420 * some board problems. The link will not go down
7421 * if we do parallel detect.
7423 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7424 pdev->subsystem_device == 0x310c)
7425 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7428 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7429 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7431 bnx2_init_remote_phy(bp);
7433 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7434 CHIP_NUM(bp) == CHIP_NUM_5708)
7435 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7436 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7437 (CHIP_REV(bp) == CHIP_REV_Ax ||
7438 CHIP_REV(bp) == CHIP_REV_Bx))
7439 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7441 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7442 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7443 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7444 bp->flags |= BNX2_FLAG_NO_WOL;
7448 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7449 bp->tx_quick_cons_trip_int =
7450 bp->tx_quick_cons_trip;
7451 bp->tx_ticks_int = bp->tx_ticks;
7452 bp->rx_quick_cons_trip_int =
7453 bp->rx_quick_cons_trip;
7454 bp->rx_ticks_int = bp->rx_ticks;
7455 bp->comp_prod_trip_int = bp->comp_prod_trip;
7456 bp->com_ticks_int = bp->com_ticks;
7457 bp->cmd_ticks_int = bp->cmd_ticks;
7460 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7462 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7463 * with byte enables disabled on the unused 32-bit word. This is legal
7464 * but causes problems on the AMD 8132 which will eventually stop
7465 * responding after a while.
7467 * AMD believes this incompatibility is unique to the 5706, and
7468 * prefers to locally disable MSI rather than globally disabling it.
7470 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7471 struct pci_dev *amd_8132 = NULL;
7473 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7474 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7477 if (amd_8132->revision >= 0x10 &&
7478 amd_8132->revision <= 0x13) {
7480 pci_dev_put(amd_8132);
7486 bnx2_set_default_link(bp);
7487 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7489 init_timer(&bp->timer);
7490 bp->timer.expires = RUN_AT(bp->timer_interval);
7491 bp->timer.data = (unsigned long) bp;
7492 bp->timer.function = bnx2_timer;
7498 iounmap(bp->regview);
7503 pci_release_regions(pdev);
7506 pci_disable_device(pdev);
7507 pci_set_drvdata(pdev, NULL);
7513 static char * __devinit
7514 bnx2_bus_string(struct bnx2 *bp, char *str)
7518 if (bp->flags & BNX2_FLAG_PCIE) {
7519 s += sprintf(s, "PCI Express");
7521 s += sprintf(s, "PCI");
7522 if (bp->flags & BNX2_FLAG_PCIX)
7523 s += sprintf(s, "-X");
7524 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7525 s += sprintf(s, " 32-bit");
7527 s += sprintf(s, " 64-bit");
7528 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7533 static void __devinit
7534 bnx2_init_napi(struct bnx2 *bp)
7538 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7539 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7540 int (*poll)(struct napi_struct *, int);
7545 poll = bnx2_tx_poll;
7547 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7552 static int __devinit
7553 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7555 static int version_printed = 0;
7556 struct net_device *dev = NULL;
7560 DECLARE_MAC_BUF(mac);
7562 if (version_printed++ == 0)
7563 printk(KERN_INFO "%s", version);
7565 /* dev zeroed in init_etherdev */
7566 dev = alloc_etherdev(sizeof(*bp));
7571 rc = bnx2_init_board(pdev, dev);
7577 dev->open = bnx2_open;
7578 dev->hard_start_xmit = bnx2_start_xmit;
7579 dev->stop = bnx2_close;
7580 dev->get_stats = bnx2_get_stats;
7581 dev->set_multicast_list = bnx2_set_rx_mode;
7582 dev->do_ioctl = bnx2_ioctl;
7583 dev->set_mac_address = bnx2_change_mac_addr;
7584 dev->change_mtu = bnx2_change_mtu;
7585 dev->tx_timeout = bnx2_tx_timeout;
7586 dev->watchdog_timeo = TX_TIMEOUT;
7588 dev->vlan_rx_register = bnx2_vlan_rx_register;
7590 dev->ethtool_ops = &bnx2_ethtool_ops;
7592 bp = netdev_priv(dev);
7595 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7596 dev->poll_controller = poll_bnx2;
7599 pci_set_drvdata(pdev, dev);
7601 memcpy(dev->dev_addr, bp->mac_addr, 6);
7602 memcpy(dev->perm_addr, bp->mac_addr, 6);
7603 bp->name = board_info[ent->driver_data].name;
7605 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7606 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7607 dev->features |= NETIF_F_IPV6_CSUM;
7610 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7612 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7613 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7614 dev->features |= NETIF_F_TSO6;
7616 if ((rc = register_netdev(dev))) {
7617 dev_err(&pdev->dev, "Cannot register net device\n");
7619 iounmap(bp->regview);
7620 pci_release_regions(pdev);
7621 pci_disable_device(pdev);
7622 pci_set_drvdata(pdev, NULL);
7627 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7628 "IRQ %d, node addr %s\n",
7631 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7632 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7633 bnx2_bus_string(bp, str),
7635 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7640 static void __devexit
7641 bnx2_remove_one(struct pci_dev *pdev)
7643 struct net_device *dev = pci_get_drvdata(pdev);
7644 struct bnx2 *bp = netdev_priv(dev);
7646 flush_scheduled_work();
7648 unregister_netdev(dev);
7651 iounmap(bp->regview);
7654 pci_release_regions(pdev);
7655 pci_disable_device(pdev);
7656 pci_set_drvdata(pdev, NULL);
7660 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7662 struct net_device *dev = pci_get_drvdata(pdev);
7663 struct bnx2 *bp = netdev_priv(dev);
7666 /* PCI register 4 needs to be saved whether netif_running() or not.
7667 * MSI address and data need to be saved if using MSI and
7670 pci_save_state(pdev);
7671 if (!netif_running(dev))
7674 flush_scheduled_work();
7675 bnx2_netif_stop(bp);
7676 netif_device_detach(dev);
7677 del_timer_sync(&bp->timer);
7678 if (bp->flags & BNX2_FLAG_NO_WOL)
7679 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7681 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7683 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7684 bnx2_reset_chip(bp, reset_code);
7686 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7691 bnx2_resume(struct pci_dev *pdev)
7693 struct net_device *dev = pci_get_drvdata(pdev);
7694 struct bnx2 *bp = netdev_priv(dev);
7696 pci_restore_state(pdev);
7697 if (!netif_running(dev))
7700 bnx2_set_power_state(bp, PCI_D0);
7701 netif_device_attach(dev);
7702 bnx2_init_nic(bp, 1);
7703 bnx2_netif_start(bp);
7708 * bnx2_io_error_detected - called when PCI error is detected
7709 * @pdev: Pointer to PCI device
7710 * @state: The current pci connection state
7712 * This function is called after a PCI bus error affecting
7713 * this device has been detected.
7715 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7716 pci_channel_state_t state)
7718 struct net_device *dev = pci_get_drvdata(pdev);
7719 struct bnx2 *bp = netdev_priv(dev);
7722 netif_device_detach(dev);
7724 if (netif_running(dev)) {
7725 bnx2_netif_stop(bp);
7726 del_timer_sync(&bp->timer);
7727 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7730 pci_disable_device(pdev);
7733 /* Request a slot slot reset. */
7734 return PCI_ERS_RESULT_NEED_RESET;
7738 * bnx2_io_slot_reset - called after the pci bus has been reset.
7739 * @pdev: Pointer to PCI device
7741 * Restart the card from scratch, as if from a cold-boot.
7743 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7745 struct net_device *dev = pci_get_drvdata(pdev);
7746 struct bnx2 *bp = netdev_priv(dev);
7749 if (pci_enable_device(pdev)) {
7751 "Cannot re-enable PCI device after reset.\n");
7753 return PCI_ERS_RESULT_DISCONNECT;
7755 pci_set_master(pdev);
7756 pci_restore_state(pdev);
7758 if (netif_running(dev)) {
7759 bnx2_set_power_state(bp, PCI_D0);
7760 bnx2_init_nic(bp, 1);
7764 return PCI_ERS_RESULT_RECOVERED;
7768 * bnx2_io_resume - called when traffic can start flowing again.
7769 * @pdev: Pointer to PCI device
7771 * This callback is called when the error recovery driver tells us that
7772 * its OK to resume normal operation.
7774 static void bnx2_io_resume(struct pci_dev *pdev)
7776 struct net_device *dev = pci_get_drvdata(pdev);
7777 struct bnx2 *bp = netdev_priv(dev);
7780 if (netif_running(dev))
7781 bnx2_netif_start(bp);
7783 netif_device_attach(dev);
7787 static struct pci_error_handlers bnx2_err_handler = {
7788 .error_detected = bnx2_io_error_detected,
7789 .slot_reset = bnx2_io_slot_reset,
7790 .resume = bnx2_io_resume,
7793 static struct pci_driver bnx2_pci_driver = {
7794 .name = DRV_MODULE_NAME,
7795 .id_table = bnx2_pci_tbl,
7796 .probe = bnx2_init_one,
7797 .remove = __devexit_p(bnx2_remove_one),
7798 .suspend = bnx2_suspend,
7799 .resume = bnx2_resume,
7800 .err_handler = &bnx2_err_handler,
7803 static int __init bnx2_init(void)
7805 return pci_register_driver(&bnx2_pci_driver);
7808 static void __exit bnx2_cleanup(void)
7810 pci_unregister_driver(&bnx2_pci_driver);
7813 module_init(bnx2_init);
7814 module_exit(bnx2_cleanup);