1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.6"
60 #define DRV_MODULE_RELDATE "May 16, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = txr->tx_prod - txr->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
271 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
277 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
288 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) {
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300 REG_WR(bp, BNX2_CTX_DATA, val);
302 spin_unlock_bh(&bp->indirect_lock);
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 = (bp->phy_addr << 21) | (reg << 16) |
322 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323 BNX2_EMAC_MDIO_COMM_START_BUSY;
324 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
326 for (i = 0; i < 50; i++) {
329 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
340 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
353 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
372 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
383 for (i = 0; i < 50; i++) {
386 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
402 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412 bnx2_disable_int(struct bnx2 *bp)
415 struct bnx2_napi *bnapi;
417 for (i = 0; i < bp->irq_nvecs; i++) {
418 bnapi = &bp->bnx2_napi[i];
419 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
422 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
426 bnx2_enable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
434 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437 bnapi->last_status_idx);
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
447 bnx2_disable_int_sync(struct bnx2 *bp)
451 atomic_inc(&bp->intr_sem);
452 bnx2_disable_int(bp);
453 for (i = 0; i < bp->irq_nvecs; i++)
454 synchronize_irq(bp->irq_tbl[i].vector);
458 bnx2_napi_disable(struct bnx2 *bp)
462 for (i = 0; i < bp->irq_nvecs; i++)
463 napi_disable(&bp->bnx2_napi[i].napi);
467 bnx2_napi_enable(struct bnx2 *bp)
471 for (i = 0; i < bp->irq_nvecs; i++)
472 napi_enable(&bp->bnx2_napi[i].napi);
476 bnx2_netif_stop(struct bnx2 *bp)
478 bnx2_disable_int_sync(bp);
479 if (netif_running(bp->dev)) {
480 bnx2_napi_disable(bp);
481 netif_tx_disable(bp->dev);
482 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 bnx2_netif_start(struct bnx2 *bp)
489 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev);
492 bnx2_napi_enable(bp);
499 bnx2_free_tx_mem(struct bnx2 *bp)
503 for (i = 0; i < bp->num_tx_rings; i++) {
504 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
505 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
507 if (txr->tx_desc_ring) {
508 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
510 txr->tx_desc_mapping);
511 txr->tx_desc_ring = NULL;
513 kfree(txr->tx_buf_ring);
514 txr->tx_buf_ring = NULL;
519 bnx2_free_rx_mem(struct bnx2 *bp)
523 for (i = 0; i < bp->num_rx_rings; i++) {
524 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
525 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
528 for (j = 0; j < bp->rx_max_ring; j++) {
529 if (rxr->rx_desc_ring[j])
530 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
531 rxr->rx_desc_ring[j],
532 rxr->rx_desc_mapping[j]);
533 rxr->rx_desc_ring[j] = NULL;
535 if (rxr->rx_buf_ring)
536 vfree(rxr->rx_buf_ring);
537 rxr->rx_buf_ring = NULL;
539 for (j = 0; j < bp->rx_max_pg_ring; j++) {
540 if (rxr->rx_pg_desc_ring[j])
541 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
542 rxr->rx_pg_desc_ring[i],
543 rxr->rx_pg_desc_mapping[i]);
544 rxr->rx_pg_desc_ring[i] = NULL;
547 vfree(rxr->rx_pg_ring);
548 rxr->rx_pg_ring = NULL;
553 bnx2_alloc_tx_mem(struct bnx2 *bp)
557 for (i = 0; i < bp->num_tx_rings; i++) {
558 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
559 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
561 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
562 if (txr->tx_buf_ring == NULL)
566 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
567 &txr->tx_desc_mapping);
568 if (txr->tx_desc_ring == NULL)
575 bnx2_alloc_rx_mem(struct bnx2 *bp)
579 for (i = 0; i < bp->num_rx_rings; i++) {
580 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
581 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
585 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
586 if (rxr->rx_buf_ring == NULL)
589 memset(rxr->rx_buf_ring, 0,
590 SW_RXBD_RING_SIZE * bp->rx_max_ring);
592 for (j = 0; j < bp->rx_max_ring; j++) {
593 rxr->rx_desc_ring[j] =
594 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
595 &rxr->rx_desc_mapping[j]);
596 if (rxr->rx_desc_ring[j] == NULL)
601 if (bp->rx_pg_ring_size) {
602 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
604 if (rxr->rx_pg_ring == NULL)
607 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
611 for (j = 0; j < bp->rx_max_pg_ring; j++) {
612 rxr->rx_pg_desc_ring[j] =
613 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
614 &rxr->rx_pg_desc_mapping[j]);
615 if (rxr->rx_pg_desc_ring[j] == NULL)
624 bnx2_free_mem(struct bnx2 *bp)
627 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
629 bnx2_free_tx_mem(bp);
630 bnx2_free_rx_mem(bp);
632 for (i = 0; i < bp->ctx_pages; i++) {
633 if (bp->ctx_blk[i]) {
634 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
636 bp->ctx_blk_mapping[i]);
637 bp->ctx_blk[i] = NULL;
640 if (bnapi->status_blk.msi) {
641 pci_free_consistent(bp->pdev, bp->status_stats_size,
642 bnapi->status_blk.msi,
643 bp->status_blk_mapping);
644 bnapi->status_blk.msi = NULL;
645 bp->stats_blk = NULL;
650 bnx2_alloc_mem(struct bnx2 *bp)
652 int i, status_blk_size, err;
653 struct bnx2_napi *bnapi;
656 /* Combine status and statistics blocks into one allocation. */
657 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
658 if (bp->flags & BNX2_FLAG_MSIX_CAP)
659 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
660 BNX2_SBLK_MSIX_ALIGN_SIZE);
661 bp->status_stats_size = status_blk_size +
662 sizeof(struct statistics_block);
664 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
665 &bp->status_blk_mapping);
666 if (status_blk == NULL)
669 memset(status_blk, 0, bp->status_stats_size);
671 bnapi = &bp->bnx2_napi[0];
672 bnapi->status_blk.msi = status_blk;
673 bnapi->hw_tx_cons_ptr =
674 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
675 bnapi->hw_rx_cons_ptr =
676 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
677 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
678 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
679 struct status_block_msix *sblk;
681 bnapi = &bp->bnx2_napi[i];
683 sblk = (void *) (status_blk +
684 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
685 bnapi->status_blk.msix = sblk;
686 bnapi->hw_tx_cons_ptr =
687 &sblk->status_tx_quick_consumer_index;
688 bnapi->hw_rx_cons_ptr =
689 &sblk->status_rx_quick_consumer_index;
690 bnapi->int_num = i << 24;
694 bp->stats_blk = status_blk + status_blk_size;
696 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
698 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
699 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
700 if (bp->ctx_pages == 0)
702 for (i = 0; i < bp->ctx_pages; i++) {
703 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
705 &bp->ctx_blk_mapping[i]);
706 if (bp->ctx_blk[i] == NULL)
711 err = bnx2_alloc_rx_mem(bp);
715 err = bnx2_alloc_tx_mem(bp);
727 bnx2_report_fw_link(struct bnx2 *bp)
729 u32 fw_link_status = 0;
731 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
737 switch (bp->line_speed) {
739 if (bp->duplex == DUPLEX_HALF)
740 fw_link_status = BNX2_LINK_STATUS_10HALF;
742 fw_link_status = BNX2_LINK_STATUS_10FULL;
745 if (bp->duplex == DUPLEX_HALF)
746 fw_link_status = BNX2_LINK_STATUS_100HALF;
748 fw_link_status = BNX2_LINK_STATUS_100FULL;
751 if (bp->duplex == DUPLEX_HALF)
752 fw_link_status = BNX2_LINK_STATUS_1000HALF;
754 fw_link_status = BNX2_LINK_STATUS_1000FULL;
757 if (bp->duplex == DUPLEX_HALF)
758 fw_link_status = BNX2_LINK_STATUS_2500HALF;
760 fw_link_status = BNX2_LINK_STATUS_2500FULL;
764 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
767 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
769 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
770 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
772 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
773 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
774 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
776 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
780 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
782 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
786 bnx2_xceiver_str(struct bnx2 *bp)
788 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
789 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
794 bnx2_report_link(struct bnx2 *bp)
797 netif_carrier_on(bp->dev);
798 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
799 bnx2_xceiver_str(bp));
801 printk("%d Mbps ", bp->line_speed);
803 if (bp->duplex == DUPLEX_FULL)
804 printk("full duplex");
806 printk("half duplex");
809 if (bp->flow_ctrl & FLOW_CTRL_RX) {
810 printk(", receive ");
811 if (bp->flow_ctrl & FLOW_CTRL_TX)
812 printk("& transmit ");
815 printk(", transmit ");
817 printk("flow control ON");
822 netif_carrier_off(bp->dev);
823 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
824 bnx2_xceiver_str(bp));
827 bnx2_report_fw_link(bp);
831 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
833 u32 local_adv, remote_adv;
836 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
837 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
839 if (bp->duplex == DUPLEX_FULL) {
840 bp->flow_ctrl = bp->req_flow_ctrl;
845 if (bp->duplex != DUPLEX_FULL) {
849 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
850 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
853 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
854 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
855 bp->flow_ctrl |= FLOW_CTRL_TX;
856 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
857 bp->flow_ctrl |= FLOW_CTRL_RX;
861 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
862 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
864 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
865 u32 new_local_adv = 0;
866 u32 new_remote_adv = 0;
868 if (local_adv & ADVERTISE_1000XPAUSE)
869 new_local_adv |= ADVERTISE_PAUSE_CAP;
870 if (local_adv & ADVERTISE_1000XPSE_ASYM)
871 new_local_adv |= ADVERTISE_PAUSE_ASYM;
872 if (remote_adv & ADVERTISE_1000XPAUSE)
873 new_remote_adv |= ADVERTISE_PAUSE_CAP;
874 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
875 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
877 local_adv = new_local_adv;
878 remote_adv = new_remote_adv;
881 /* See Table 28B-3 of 802.3ab-1999 spec. */
882 if (local_adv & ADVERTISE_PAUSE_CAP) {
883 if(local_adv & ADVERTISE_PAUSE_ASYM) {
884 if (remote_adv & ADVERTISE_PAUSE_CAP) {
885 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
887 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
888 bp->flow_ctrl = FLOW_CTRL_RX;
892 if (remote_adv & ADVERTISE_PAUSE_CAP) {
893 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
897 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
898 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
899 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
901 bp->flow_ctrl = FLOW_CTRL_TX;
907 bnx2_5709s_linkup(struct bnx2 *bp)
913 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
914 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
915 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
917 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
918 bp->line_speed = bp->req_line_speed;
919 bp->duplex = bp->req_duplex;
922 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
924 case MII_BNX2_GP_TOP_AN_SPEED_10:
925 bp->line_speed = SPEED_10;
927 case MII_BNX2_GP_TOP_AN_SPEED_100:
928 bp->line_speed = SPEED_100;
930 case MII_BNX2_GP_TOP_AN_SPEED_1G:
931 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
932 bp->line_speed = SPEED_1000;
934 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
935 bp->line_speed = SPEED_2500;
938 if (val & MII_BNX2_GP_TOP_AN_FD)
939 bp->duplex = DUPLEX_FULL;
941 bp->duplex = DUPLEX_HALF;
946 bnx2_5708s_linkup(struct bnx2 *bp)
951 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
952 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
953 case BCM5708S_1000X_STAT1_SPEED_10:
954 bp->line_speed = SPEED_10;
956 case BCM5708S_1000X_STAT1_SPEED_100:
957 bp->line_speed = SPEED_100;
959 case BCM5708S_1000X_STAT1_SPEED_1G:
960 bp->line_speed = SPEED_1000;
962 case BCM5708S_1000X_STAT1_SPEED_2G5:
963 bp->line_speed = SPEED_2500;
966 if (val & BCM5708S_1000X_STAT1_FD)
967 bp->duplex = DUPLEX_FULL;
969 bp->duplex = DUPLEX_HALF;
975 bnx2_5706s_linkup(struct bnx2 *bp)
977 u32 bmcr, local_adv, remote_adv, common;
980 bp->line_speed = SPEED_1000;
982 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
983 if (bmcr & BMCR_FULLDPLX) {
984 bp->duplex = DUPLEX_FULL;
987 bp->duplex = DUPLEX_HALF;
990 if (!(bmcr & BMCR_ANENABLE)) {
994 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
995 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
997 common = local_adv & remote_adv;
998 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1000 if (common & ADVERTISE_1000XFULL) {
1001 bp->duplex = DUPLEX_FULL;
1004 bp->duplex = DUPLEX_HALF;
1012 bnx2_copper_linkup(struct bnx2 *bp)
1016 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1017 if (bmcr & BMCR_ANENABLE) {
1018 u32 local_adv, remote_adv, common;
1020 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1021 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1023 common = local_adv & (remote_adv >> 2);
1024 if (common & ADVERTISE_1000FULL) {
1025 bp->line_speed = SPEED_1000;
1026 bp->duplex = DUPLEX_FULL;
1028 else if (common & ADVERTISE_1000HALF) {
1029 bp->line_speed = SPEED_1000;
1030 bp->duplex = DUPLEX_HALF;
1033 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1034 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1036 common = local_adv & remote_adv;
1037 if (common & ADVERTISE_100FULL) {
1038 bp->line_speed = SPEED_100;
1039 bp->duplex = DUPLEX_FULL;
1041 else if (common & ADVERTISE_100HALF) {
1042 bp->line_speed = SPEED_100;
1043 bp->duplex = DUPLEX_HALF;
1045 else if (common & ADVERTISE_10FULL) {
1046 bp->line_speed = SPEED_10;
1047 bp->duplex = DUPLEX_FULL;
1049 else if (common & ADVERTISE_10HALF) {
1050 bp->line_speed = SPEED_10;
1051 bp->duplex = DUPLEX_HALF;
1060 if (bmcr & BMCR_SPEED100) {
1061 bp->line_speed = SPEED_100;
1064 bp->line_speed = SPEED_10;
1066 if (bmcr & BMCR_FULLDPLX) {
1067 bp->duplex = DUPLEX_FULL;
1070 bp->duplex = DUPLEX_HALF;
1078 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1080 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1082 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1083 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1086 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1087 u32 lo_water, hi_water;
1089 if (bp->flow_ctrl & FLOW_CTRL_TX)
1090 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1092 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1093 if (lo_water >= bp->rx_ring_size)
1096 hi_water = bp->rx_ring_size / 4;
1098 if (hi_water <= lo_water)
1101 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1102 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1106 else if (hi_water == 0)
1108 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1110 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1114 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1119 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1122 bnx2_init_rx_context(bp, cid);
1127 bnx2_set_mac_link(struct bnx2 *bp)
1131 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1132 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1133 (bp->duplex == DUPLEX_HALF)) {
1134 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1137 /* Configure the EMAC mode register. */
1138 val = REG_RD(bp, BNX2_EMAC_MODE);
1140 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1141 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1142 BNX2_EMAC_MODE_25G_MODE);
1145 switch (bp->line_speed) {
1147 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1148 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1153 val |= BNX2_EMAC_MODE_PORT_MII;
1156 val |= BNX2_EMAC_MODE_25G_MODE;
1159 val |= BNX2_EMAC_MODE_PORT_GMII;
1164 val |= BNX2_EMAC_MODE_PORT_GMII;
1167 /* Set the MAC to operate in the appropriate duplex mode. */
1168 if (bp->duplex == DUPLEX_HALF)
1169 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1170 REG_WR(bp, BNX2_EMAC_MODE, val);
1172 /* Enable/disable rx PAUSE. */
1173 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1175 if (bp->flow_ctrl & FLOW_CTRL_RX)
1176 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1177 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1179 /* Enable/disable tx PAUSE. */
1180 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1181 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1183 if (bp->flow_ctrl & FLOW_CTRL_TX)
1184 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1185 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1187 /* Acknowledge the interrupt. */
1188 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1190 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1191 bnx2_init_all_rx_contexts(bp);
1197 bnx2_enable_bmsr1(struct bnx2 *bp)
1199 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1200 (CHIP_NUM(bp) == CHIP_NUM_5709))
1201 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1202 MII_BNX2_BLK_ADDR_GP_STATUS);
1206 bnx2_disable_bmsr1(struct bnx2 *bp)
1208 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1209 (CHIP_NUM(bp) == CHIP_NUM_5709))
1210 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1211 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1215 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1220 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1223 if (bp->autoneg & AUTONEG_SPEED)
1224 bp->advertising |= ADVERTISED_2500baseX_Full;
1226 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1227 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1229 bnx2_read_phy(bp, bp->mii_up1, &up1);
1230 if (!(up1 & BCM5708S_UP1_2G5)) {
1231 up1 |= BCM5708S_UP1_2G5;
1232 bnx2_write_phy(bp, bp->mii_up1, up1);
1236 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1237 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1238 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1244 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1249 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1252 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1253 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1255 bnx2_read_phy(bp, bp->mii_up1, &up1);
1256 if (up1 & BCM5708S_UP1_2G5) {
1257 up1 &= ~BCM5708S_UP1_2G5;
1258 bnx2_write_phy(bp, bp->mii_up1, up1);
1262 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1263 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1264 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1270 bnx2_enable_forced_2g5(struct bnx2 *bp)
1274 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1277 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1280 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1281 MII_BNX2_BLK_ADDR_SERDES_DIG);
1282 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1283 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1284 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1285 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1287 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1288 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1289 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1291 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1292 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293 bmcr |= BCM5708S_BMCR_FORCE_2500;
1296 if (bp->autoneg & AUTONEG_SPEED) {
1297 bmcr &= ~BMCR_ANENABLE;
1298 if (bp->req_duplex == DUPLEX_FULL)
1299 bmcr |= BMCR_FULLDPLX;
1301 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1305 bnx2_disable_forced_2g5(struct bnx2 *bp)
1309 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1312 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1315 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1316 MII_BNX2_BLK_ADDR_SERDES_DIG);
1317 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1318 val &= ~MII_BNX2_SD_MISC1_FORCE;
1319 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1321 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1323 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1325 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1326 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1327 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1330 if (bp->autoneg & AUTONEG_SPEED)
1331 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1332 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1336 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1340 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1341 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1343 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1345 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1349 bnx2_set_link(struct bnx2 *bp)
1354 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1359 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1362 link_up = bp->link_up;
1364 bnx2_enable_bmsr1(bp);
1365 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1366 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1367 bnx2_disable_bmsr1(bp);
1369 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1370 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1373 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1374 bnx2_5706s_force_link_dn(bp, 0);
1375 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1377 val = REG_RD(bp, BNX2_EMAC_STATUS);
1379 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1380 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1381 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1383 if ((val & BNX2_EMAC_STATUS_LINK) &&
1384 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1385 bmsr |= BMSR_LSTATUS;
1387 bmsr &= ~BMSR_LSTATUS;
1390 if (bmsr & BMSR_LSTATUS) {
1393 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1394 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395 bnx2_5706s_linkup(bp);
1396 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397 bnx2_5708s_linkup(bp);
1398 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399 bnx2_5709s_linkup(bp);
1402 bnx2_copper_linkup(bp);
1404 bnx2_resolve_flow_ctrl(bp);
1407 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1408 (bp->autoneg & AUTONEG_SPEED))
1409 bnx2_disable_forced_2g5(bp);
1411 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1414 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1415 bmcr |= BMCR_ANENABLE;
1416 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1418 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1423 if (bp->link_up != link_up) {
1424 bnx2_report_link(bp);
1427 bnx2_set_mac_link(bp);
1433 bnx2_reset_phy(struct bnx2 *bp)
1438 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1440 #define PHY_RESET_MAX_WAIT 100
1441 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1444 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1445 if (!(reg & BMCR_RESET)) {
1450 if (i == PHY_RESET_MAX_WAIT) {
1457 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1461 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1462 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1464 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1465 adv = ADVERTISE_1000XPAUSE;
1468 adv = ADVERTISE_PAUSE_CAP;
1471 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1472 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1473 adv = ADVERTISE_1000XPSE_ASYM;
1476 adv = ADVERTISE_PAUSE_ASYM;
1479 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1480 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1481 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1484 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1490 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1493 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1495 u32 speed_arg = 0, pause_adv;
1497 pause_adv = bnx2_phy_get_pause_adv(bp);
1499 if (bp->autoneg & AUTONEG_SPEED) {
1500 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1501 if (bp->advertising & ADVERTISED_10baseT_Half)
1502 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1503 if (bp->advertising & ADVERTISED_10baseT_Full)
1504 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1505 if (bp->advertising & ADVERTISED_100baseT_Half)
1506 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1507 if (bp->advertising & ADVERTISED_100baseT_Full)
1508 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1509 if (bp->advertising & ADVERTISED_1000baseT_Full)
1510 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1511 if (bp->advertising & ADVERTISED_2500baseX_Full)
1512 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1514 if (bp->req_line_speed == SPEED_2500)
1515 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1516 else if (bp->req_line_speed == SPEED_1000)
1517 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1518 else if (bp->req_line_speed == SPEED_100) {
1519 if (bp->req_duplex == DUPLEX_FULL)
1520 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1522 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1523 } else if (bp->req_line_speed == SPEED_10) {
1524 if (bp->req_duplex == DUPLEX_FULL)
1525 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1527 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1531 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1532 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1533 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1534 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1536 if (port == PORT_TP)
1537 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1538 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1540 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1542 spin_unlock_bh(&bp->phy_lock);
1543 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1544 spin_lock_bh(&bp->phy_lock);
1550 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1555 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1556 return (bnx2_setup_remote_phy(bp, port));
1558 if (!(bp->autoneg & AUTONEG_SPEED)) {
1560 int force_link_down = 0;
1562 if (bp->req_line_speed == SPEED_2500) {
1563 if (!bnx2_test_and_enable_2g5(bp))
1564 force_link_down = 1;
1565 } else if (bp->req_line_speed == SPEED_1000) {
1566 if (bnx2_test_and_disable_2g5(bp))
1567 force_link_down = 1;
1569 bnx2_read_phy(bp, bp->mii_adv, &adv);
1570 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1572 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1573 new_bmcr = bmcr & ~BMCR_ANENABLE;
1574 new_bmcr |= BMCR_SPEED1000;
1576 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1577 if (bp->req_line_speed == SPEED_2500)
1578 bnx2_enable_forced_2g5(bp);
1579 else if (bp->req_line_speed == SPEED_1000) {
1580 bnx2_disable_forced_2g5(bp);
1581 new_bmcr &= ~0x2000;
1584 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1585 if (bp->req_line_speed == SPEED_2500)
1586 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1588 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1591 if (bp->req_duplex == DUPLEX_FULL) {
1592 adv |= ADVERTISE_1000XFULL;
1593 new_bmcr |= BMCR_FULLDPLX;
1596 adv |= ADVERTISE_1000XHALF;
1597 new_bmcr &= ~BMCR_FULLDPLX;
1599 if ((new_bmcr != bmcr) || (force_link_down)) {
1600 /* Force a link down visible on the other side */
1602 bnx2_write_phy(bp, bp->mii_adv, adv &
1603 ~(ADVERTISE_1000XFULL |
1604 ADVERTISE_1000XHALF));
1605 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1606 BMCR_ANRESTART | BMCR_ANENABLE);
1609 netif_carrier_off(bp->dev);
1610 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1611 bnx2_report_link(bp);
1613 bnx2_write_phy(bp, bp->mii_adv, adv);
1614 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1616 bnx2_resolve_flow_ctrl(bp);
1617 bnx2_set_mac_link(bp);
1622 bnx2_test_and_enable_2g5(bp);
1624 if (bp->advertising & ADVERTISED_1000baseT_Full)
1625 new_adv |= ADVERTISE_1000XFULL;
1627 new_adv |= bnx2_phy_get_pause_adv(bp);
1629 bnx2_read_phy(bp, bp->mii_adv, &adv);
1630 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1632 bp->serdes_an_pending = 0;
1633 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1634 /* Force a link down visible on the other side */
1636 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1637 spin_unlock_bh(&bp->phy_lock);
1639 spin_lock_bh(&bp->phy_lock);
1642 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1643 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1645 /* Speed up link-up time when the link partner
1646 * does not autonegotiate which is very common
1647 * in blade servers. Some blade servers use
1648 * IPMI for kerboard input and it's important
1649 * to minimize link disruptions. Autoneg. involves
1650 * exchanging base pages plus 3 next pages and
1651 * normally completes in about 120 msec.
1653 bp->current_interval = SERDES_AN_TIMEOUT;
1654 bp->serdes_an_pending = 1;
1655 mod_timer(&bp->timer, jiffies + bp->current_interval);
1657 bnx2_resolve_flow_ctrl(bp);
1658 bnx2_set_mac_link(bp);
1664 #define ETHTOOL_ALL_FIBRE_SPEED \
1665 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1666 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1667 (ADVERTISED_1000baseT_Full)
1669 #define ETHTOOL_ALL_COPPER_SPEED \
1670 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1671 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1672 ADVERTISED_1000baseT_Full)
1674 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1675 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1677 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1680 bnx2_set_default_remote_link(struct bnx2 *bp)
1684 if (bp->phy_port == PORT_TP)
1685 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1687 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1689 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1690 bp->req_line_speed = 0;
1691 bp->autoneg |= AUTONEG_SPEED;
1692 bp->advertising = ADVERTISED_Autoneg;
1693 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1694 bp->advertising |= ADVERTISED_10baseT_Half;
1695 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1696 bp->advertising |= ADVERTISED_10baseT_Full;
1697 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1698 bp->advertising |= ADVERTISED_100baseT_Half;
1699 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1700 bp->advertising |= ADVERTISED_100baseT_Full;
1701 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1702 bp->advertising |= ADVERTISED_1000baseT_Full;
1703 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1704 bp->advertising |= ADVERTISED_2500baseX_Full;
1707 bp->advertising = 0;
1708 bp->req_duplex = DUPLEX_FULL;
1709 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1710 bp->req_line_speed = SPEED_10;
1711 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1712 bp->req_duplex = DUPLEX_HALF;
1714 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1715 bp->req_line_speed = SPEED_100;
1716 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1717 bp->req_duplex = DUPLEX_HALF;
1719 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1720 bp->req_line_speed = SPEED_1000;
1721 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1722 bp->req_line_speed = SPEED_2500;
1727 bnx2_set_default_link(struct bnx2 *bp)
1729 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1730 bnx2_set_default_remote_link(bp);
1734 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1735 bp->req_line_speed = 0;
1736 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1739 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1741 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1742 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1743 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1745 bp->req_line_speed = bp->line_speed = SPEED_1000;
1746 bp->req_duplex = DUPLEX_FULL;
1749 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1753 bnx2_send_heart_beat(struct bnx2 *bp)
1758 spin_lock(&bp->indirect_lock);
1759 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1760 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1761 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1762 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1763 spin_unlock(&bp->indirect_lock);
1767 bnx2_remote_phy_event(struct bnx2 *bp)
1770 u8 link_up = bp->link_up;
1773 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1775 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1776 bnx2_send_heart_beat(bp);
1778 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1780 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1786 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1787 bp->duplex = DUPLEX_FULL;
1789 case BNX2_LINK_STATUS_10HALF:
1790 bp->duplex = DUPLEX_HALF;
1791 case BNX2_LINK_STATUS_10FULL:
1792 bp->line_speed = SPEED_10;
1794 case BNX2_LINK_STATUS_100HALF:
1795 bp->duplex = DUPLEX_HALF;
1796 case BNX2_LINK_STATUS_100BASE_T4:
1797 case BNX2_LINK_STATUS_100FULL:
1798 bp->line_speed = SPEED_100;
1800 case BNX2_LINK_STATUS_1000HALF:
1801 bp->duplex = DUPLEX_HALF;
1802 case BNX2_LINK_STATUS_1000FULL:
1803 bp->line_speed = SPEED_1000;
1805 case BNX2_LINK_STATUS_2500HALF:
1806 bp->duplex = DUPLEX_HALF;
1807 case BNX2_LINK_STATUS_2500FULL:
1808 bp->line_speed = SPEED_2500;
1816 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1817 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1818 if (bp->duplex == DUPLEX_FULL)
1819 bp->flow_ctrl = bp->req_flow_ctrl;
1821 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1822 bp->flow_ctrl |= FLOW_CTRL_TX;
1823 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1824 bp->flow_ctrl |= FLOW_CTRL_RX;
1827 old_port = bp->phy_port;
1828 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1829 bp->phy_port = PORT_FIBRE;
1831 bp->phy_port = PORT_TP;
1833 if (old_port != bp->phy_port)
1834 bnx2_set_default_link(bp);
1837 if (bp->link_up != link_up)
1838 bnx2_report_link(bp);
1840 bnx2_set_mac_link(bp);
1844 bnx2_set_remote_link(struct bnx2 *bp)
1848 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1850 case BNX2_FW_EVT_CODE_LINK_EVENT:
1851 bnx2_remote_phy_event(bp);
1853 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1855 bnx2_send_heart_beat(bp);
1862 bnx2_setup_copper_phy(struct bnx2 *bp)
1867 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1869 if (bp->autoneg & AUTONEG_SPEED) {
1870 u32 adv_reg, adv1000_reg;
1871 u32 new_adv_reg = 0;
1872 u32 new_adv1000_reg = 0;
1874 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1875 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1876 ADVERTISE_PAUSE_ASYM);
1878 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1879 adv1000_reg &= PHY_ALL_1000_SPEED;
1881 if (bp->advertising & ADVERTISED_10baseT_Half)
1882 new_adv_reg |= ADVERTISE_10HALF;
1883 if (bp->advertising & ADVERTISED_10baseT_Full)
1884 new_adv_reg |= ADVERTISE_10FULL;
1885 if (bp->advertising & ADVERTISED_100baseT_Half)
1886 new_adv_reg |= ADVERTISE_100HALF;
1887 if (bp->advertising & ADVERTISED_100baseT_Full)
1888 new_adv_reg |= ADVERTISE_100FULL;
1889 if (bp->advertising & ADVERTISED_1000baseT_Full)
1890 new_adv1000_reg |= ADVERTISE_1000FULL;
1892 new_adv_reg |= ADVERTISE_CSMA;
1894 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1896 if ((adv1000_reg != new_adv1000_reg) ||
1897 (adv_reg != new_adv_reg) ||
1898 ((bmcr & BMCR_ANENABLE) == 0)) {
1900 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1901 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1902 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1905 else if (bp->link_up) {
1906 /* Flow ctrl may have changed from auto to forced */
1907 /* or vice-versa. */
1909 bnx2_resolve_flow_ctrl(bp);
1910 bnx2_set_mac_link(bp);
1916 if (bp->req_line_speed == SPEED_100) {
1917 new_bmcr |= BMCR_SPEED100;
1919 if (bp->req_duplex == DUPLEX_FULL) {
1920 new_bmcr |= BMCR_FULLDPLX;
1922 if (new_bmcr != bmcr) {
1925 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1926 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1928 if (bmsr & BMSR_LSTATUS) {
1929 /* Force link down */
1930 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1931 spin_unlock_bh(&bp->phy_lock);
1933 spin_lock_bh(&bp->phy_lock);
1935 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1936 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1939 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1941 /* Normally, the new speed is setup after the link has
1942 * gone down and up again. In some cases, link will not go
1943 * down so we need to set up the new speed here.
1945 if (bmsr & BMSR_LSTATUS) {
1946 bp->line_speed = bp->req_line_speed;
1947 bp->duplex = bp->req_duplex;
1948 bnx2_resolve_flow_ctrl(bp);
1949 bnx2_set_mac_link(bp);
1952 bnx2_resolve_flow_ctrl(bp);
1953 bnx2_set_mac_link(bp);
1959 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1961 if (bp->loopback == MAC_LOOPBACK)
1964 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1965 return (bnx2_setup_serdes_phy(bp, port));
1968 return (bnx2_setup_copper_phy(bp));
1973 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1977 bp->mii_bmcr = MII_BMCR + 0x10;
1978 bp->mii_bmsr = MII_BMSR + 0x10;
1979 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1980 bp->mii_adv = MII_ADVERTISE + 0x10;
1981 bp->mii_lpa = MII_LPA + 0x10;
1982 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1985 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1987 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1993 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1994 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1995 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1996 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1998 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1999 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2000 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2001 val |= BCM5708S_UP1_2G5;
2003 val &= ~BCM5708S_UP1_2G5;
2004 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2006 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2007 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2008 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2009 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2011 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2013 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2014 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2015 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2017 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2023 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2030 bp->mii_up1 = BCM5708S_UP1;
2032 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2033 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2034 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2036 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2037 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2038 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2040 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2041 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2042 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2044 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2045 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2046 val |= BCM5708S_UP1_2G5;
2047 bnx2_write_phy(bp, BCM5708S_UP1, val);
2050 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2051 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2052 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2053 /* increase tx signal amplitude */
2054 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2055 BCM5708S_BLK_ADDR_TX_MISC);
2056 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2057 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2058 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2059 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2062 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2063 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2068 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2069 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2070 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2071 BCM5708S_BLK_ADDR_TX_MISC);
2072 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2073 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074 BCM5708S_BLK_ADDR_DIG);
2081 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2086 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2088 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2089 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2091 if (bp->dev->mtu > 1500) {
2094 /* Set extended packet length bit */
2095 bnx2_write_phy(bp, 0x18, 0x7);
2096 bnx2_read_phy(bp, 0x18, &val);
2097 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2099 bnx2_write_phy(bp, 0x1c, 0x6c00);
2100 bnx2_read_phy(bp, 0x1c, &val);
2101 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2106 bnx2_write_phy(bp, 0x18, 0x7);
2107 bnx2_read_phy(bp, 0x18, &val);
2108 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2110 bnx2_write_phy(bp, 0x1c, 0x6c00);
2111 bnx2_read_phy(bp, 0x1c, &val);
2112 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2119 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2126 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2127 bnx2_write_phy(bp, 0x18, 0x0c00);
2128 bnx2_write_phy(bp, 0x17, 0x000a);
2129 bnx2_write_phy(bp, 0x15, 0x310b);
2130 bnx2_write_phy(bp, 0x17, 0x201f);
2131 bnx2_write_phy(bp, 0x15, 0x9506);
2132 bnx2_write_phy(bp, 0x17, 0x401f);
2133 bnx2_write_phy(bp, 0x15, 0x14e2);
2134 bnx2_write_phy(bp, 0x18, 0x0400);
2137 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2138 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2139 MII_BNX2_DSP_EXPAND_REG | 0x8);
2140 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2142 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2145 if (bp->dev->mtu > 1500) {
2146 /* Set extended packet length bit */
2147 bnx2_write_phy(bp, 0x18, 0x7);
2148 bnx2_read_phy(bp, 0x18, &val);
2149 bnx2_write_phy(bp, 0x18, val | 0x4000);
2151 bnx2_read_phy(bp, 0x10, &val);
2152 bnx2_write_phy(bp, 0x10, val | 0x1);
2155 bnx2_write_phy(bp, 0x18, 0x7);
2156 bnx2_read_phy(bp, 0x18, &val);
2157 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2159 bnx2_read_phy(bp, 0x10, &val);
2160 bnx2_write_phy(bp, 0x10, val & ~0x1);
2163 /* ethernet@wirespeed */
2164 bnx2_write_phy(bp, 0x18, 0x7007);
2165 bnx2_read_phy(bp, 0x18, &val);
2166 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2172 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2177 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2178 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2180 bp->mii_bmcr = MII_BMCR;
2181 bp->mii_bmsr = MII_BMSR;
2182 bp->mii_bmsr1 = MII_BMSR;
2183 bp->mii_adv = MII_ADVERTISE;
2184 bp->mii_lpa = MII_LPA;
2186 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2188 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2191 bnx2_read_phy(bp, MII_PHYSID1, &val);
2192 bp->phy_id = val << 16;
2193 bnx2_read_phy(bp, MII_PHYSID2, &val);
2194 bp->phy_id |= val & 0xffff;
2196 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2197 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2198 rc = bnx2_init_5706s_phy(bp, reset_phy);
2199 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2200 rc = bnx2_init_5708s_phy(bp, reset_phy);
2201 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2202 rc = bnx2_init_5709s_phy(bp, reset_phy);
2205 rc = bnx2_init_copper_phy(bp, reset_phy);
2210 rc = bnx2_setup_phy(bp, bp->phy_port);
2216 bnx2_set_mac_loopback(struct bnx2 *bp)
2220 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2221 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2222 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2223 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2228 static int bnx2_test_link(struct bnx2 *);
2231 bnx2_set_phy_loopback(struct bnx2 *bp)
2236 spin_lock_bh(&bp->phy_lock);
2237 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2239 spin_unlock_bh(&bp->phy_lock);
2243 for (i = 0; i < 10; i++) {
2244 if (bnx2_test_link(bp) == 0)
2249 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2250 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2251 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2252 BNX2_EMAC_MODE_25G_MODE);
2254 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2255 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2261 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2267 msg_data |= bp->fw_wr_seq;
2269 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2271 /* wait for an acknowledgement. */
2272 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2275 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2277 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2280 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2283 /* If we timed out, inform the firmware that this is the case. */
2284 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2286 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2289 msg_data &= ~BNX2_DRV_MSG_CODE;
2290 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2292 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2297 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2304 bnx2_init_5709_context(struct bnx2 *bp)
2309 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2310 val |= (BCM_PAGE_BITS - 8) << 16;
2311 REG_WR(bp, BNX2_CTX_COMMAND, val);
2312 for (i = 0; i < 10; i++) {
2313 val = REG_RD(bp, BNX2_CTX_COMMAND);
2314 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2318 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2321 for (i = 0; i < bp->ctx_pages; i++) {
2325 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2329 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2330 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2331 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2332 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2333 (u64) bp->ctx_blk_mapping[i] >> 32);
2334 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2335 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2336 for (j = 0; j < 10; j++) {
2338 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2339 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2343 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2352 bnx2_init_context(struct bnx2 *bp)
2358 u32 vcid_addr, pcid_addr, offset;
2363 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2366 vcid_addr = GET_PCID_ADDR(vcid);
2368 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2373 pcid_addr = GET_PCID_ADDR(new_vcid);
2376 vcid_addr = GET_CID_ADDR(vcid);
2377 pcid_addr = vcid_addr;
2380 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2381 vcid_addr += (i << PHY_CTX_SHIFT);
2382 pcid_addr += (i << PHY_CTX_SHIFT);
2384 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2385 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2387 /* Zero out the context. */
2388 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2389 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2395 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2401 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2402 if (good_mbuf == NULL) {
2403 printk(KERN_ERR PFX "Failed to allocate memory in "
2404 "bnx2_alloc_bad_rbuf\n");
2408 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2409 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2413 /* Allocate a bunch of mbufs and save the good ones in an array. */
2414 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2415 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2416 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2417 BNX2_RBUF_COMMAND_ALLOC_REQ);
2419 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2421 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2423 /* The addresses with Bit 9 set are bad memory blocks. */
2424 if (!(val & (1 << 9))) {
2425 good_mbuf[good_mbuf_cnt] = (u16) val;
2429 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2432 /* Free the good ones back to the mbuf pool thus discarding
2433 * all the bad ones. */
2434 while (good_mbuf_cnt) {
2437 val = good_mbuf[good_mbuf_cnt];
2438 val = (val << 9) | val | 1;
2440 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2447 bnx2_set_mac_addr(struct bnx2 *bp)
2450 u8 *mac_addr = bp->dev->dev_addr;
2452 val = (mac_addr[0] << 8) | mac_addr[1];
2454 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2456 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2457 (mac_addr[4] << 8) | mac_addr[5];
2459 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2463 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2466 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2467 struct rx_bd *rxbd =
2468 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2469 struct page *page = alloc_page(GFP_ATOMIC);
2473 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2474 PCI_DMA_FROMDEVICE);
2476 pci_unmap_addr_set(rx_pg, mapping, mapping);
2477 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2478 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2483 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2485 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2486 struct page *page = rx_pg->page;
2491 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2492 PCI_DMA_FROMDEVICE);
2499 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2501 struct sk_buff *skb;
2502 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2504 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2505 unsigned long align;
2507 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2512 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2513 skb_reserve(skb, BNX2_RX_ALIGN - align);
2515 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2516 PCI_DMA_FROMDEVICE);
2519 pci_unmap_addr_set(rx_buf, mapping, mapping);
2521 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2522 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2524 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2530 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2532 struct status_block *sblk = bnapi->status_blk.msi;
2533 u32 new_link_state, old_link_state;
2536 new_link_state = sblk->status_attn_bits & event;
2537 old_link_state = sblk->status_attn_bits_ack & event;
2538 if (new_link_state != old_link_state) {
2540 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2542 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2550 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2552 spin_lock(&bp->phy_lock);
2554 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2556 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2557 bnx2_set_remote_link(bp);
2559 spin_unlock(&bp->phy_lock);
2564 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2568 /* Tell compiler that status block fields can change. */
2570 cons = *bnapi->hw_tx_cons_ptr;
2571 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2577 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2579 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2580 u16 hw_cons, sw_cons, sw_ring_cons;
2583 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2584 sw_cons = txr->tx_cons;
2586 while (sw_cons != hw_cons) {
2587 struct sw_bd *tx_buf;
2588 struct sk_buff *skb;
2591 sw_ring_cons = TX_RING_IDX(sw_cons);
2593 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2596 /* partial BD completions possible with TSO packets */
2597 if (skb_is_gso(skb)) {
2598 u16 last_idx, last_ring_idx;
2600 last_idx = sw_cons +
2601 skb_shinfo(skb)->nr_frags + 1;
2602 last_ring_idx = sw_ring_cons +
2603 skb_shinfo(skb)->nr_frags + 1;
2604 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2607 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2612 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2613 skb_headlen(skb), PCI_DMA_TODEVICE);
2616 last = skb_shinfo(skb)->nr_frags;
2618 for (i = 0; i < last; i++) {
2619 sw_cons = NEXT_TX_BD(sw_cons);
2621 pci_unmap_page(bp->pdev,
2623 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2625 skb_shinfo(skb)->frags[i].size,
2629 sw_cons = NEXT_TX_BD(sw_cons);
2633 if (tx_pkt == budget)
2636 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2639 txr->hw_tx_cons = hw_cons;
2640 txr->tx_cons = sw_cons;
2641 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2642 * before checking for netif_queue_stopped(). Without the
2643 * memory barrier, there is a small possibility that bnx2_start_xmit()
2644 * will miss it and cause the queue to be stopped forever.
2648 if (unlikely(netif_queue_stopped(bp->dev)) &&
2649 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2650 netif_tx_lock(bp->dev);
2651 if ((netif_queue_stopped(bp->dev)) &&
2652 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2653 netif_wake_queue(bp->dev);
2654 netif_tx_unlock(bp->dev);
2660 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2661 struct sk_buff *skb, int count)
2663 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2664 struct rx_bd *cons_bd, *prod_bd;
2667 u16 hw_prod = rxr->rx_pg_prod, prod;
2668 u16 cons = rxr->rx_pg_cons;
2670 for (i = 0; i < count; i++) {
2671 prod = RX_PG_RING_IDX(hw_prod);
2673 prod_rx_pg = &rxr->rx_pg_ring[prod];
2674 cons_rx_pg = &rxr->rx_pg_ring[cons];
2675 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2676 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2678 if (i == 0 && skb) {
2680 struct skb_shared_info *shinfo;
2682 shinfo = skb_shinfo(skb);
2684 page = shinfo->frags[shinfo->nr_frags].page;
2685 shinfo->frags[shinfo->nr_frags].page = NULL;
2686 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2687 PCI_DMA_FROMDEVICE);
2688 cons_rx_pg->page = page;
2689 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2693 prod_rx_pg->page = cons_rx_pg->page;
2694 cons_rx_pg->page = NULL;
2695 pci_unmap_addr_set(prod_rx_pg, mapping,
2696 pci_unmap_addr(cons_rx_pg, mapping));
2698 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2699 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2702 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2703 hw_prod = NEXT_RX_BD(hw_prod);
2705 rxr->rx_pg_prod = hw_prod;
2706 rxr->rx_pg_cons = cons;
2710 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2711 struct sk_buff *skb, u16 cons, u16 prod)
2713 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2714 struct rx_bd *cons_bd, *prod_bd;
2716 cons_rx_buf = &rxr->rx_buf_ring[cons];
2717 prod_rx_buf = &rxr->rx_buf_ring[prod];
2719 pci_dma_sync_single_for_device(bp->pdev,
2720 pci_unmap_addr(cons_rx_buf, mapping),
2721 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2723 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2725 prod_rx_buf->skb = skb;
2730 pci_unmap_addr_set(prod_rx_buf, mapping,
2731 pci_unmap_addr(cons_rx_buf, mapping));
2733 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2734 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2735 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2736 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2740 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2741 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2745 u16 prod = ring_idx & 0xffff;
2747 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2748 if (unlikely(err)) {
2749 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2751 unsigned int raw_len = len + 4;
2752 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2754 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2759 skb_reserve(skb, BNX2_RX_OFFSET);
2760 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2761 PCI_DMA_FROMDEVICE);
2767 unsigned int i, frag_len, frag_size, pages;
2768 struct sw_pg *rx_pg;
2769 u16 pg_cons = rxr->rx_pg_cons;
2770 u16 pg_prod = rxr->rx_pg_prod;
2772 frag_size = len + 4 - hdr_len;
2773 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2774 skb_put(skb, hdr_len);
2776 for (i = 0; i < pages; i++) {
2777 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2778 if (unlikely(frag_len <= 4)) {
2779 unsigned int tail = 4 - frag_len;
2781 rxr->rx_pg_cons = pg_cons;
2782 rxr->rx_pg_prod = pg_prod;
2783 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2790 &skb_shinfo(skb)->frags[i - 1];
2792 skb->data_len -= tail;
2793 skb->truesize -= tail;
2797 rx_pg = &rxr->rx_pg_ring[pg_cons];
2799 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2800 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2805 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2808 err = bnx2_alloc_rx_page(bp, rxr,
2809 RX_PG_RING_IDX(pg_prod));
2810 if (unlikely(err)) {
2811 rxr->rx_pg_cons = pg_cons;
2812 rxr->rx_pg_prod = pg_prod;
2813 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2818 frag_size -= frag_len;
2819 skb->data_len += frag_len;
2820 skb->truesize += frag_len;
2821 skb->len += frag_len;
2823 pg_prod = NEXT_RX_BD(pg_prod);
2824 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2826 rxr->rx_pg_prod = pg_prod;
2827 rxr->rx_pg_cons = pg_cons;
2833 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2837 /* Tell compiler that status block fields can change. */
2839 cons = *bnapi->hw_rx_cons_ptr;
2840 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2846 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2848 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2849 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2850 struct l2_fhdr *rx_hdr;
2851 int rx_pkt = 0, pg_ring_used = 0;
2853 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2854 sw_cons = rxr->rx_cons;
2855 sw_prod = rxr->rx_prod;
2857 /* Memory barrier necessary as speculative reads of the rx
2858 * buffer can be ahead of the index in the status block
2861 while (sw_cons != hw_cons) {
2862 unsigned int len, hdr_len;
2864 struct sw_bd *rx_buf;
2865 struct sk_buff *skb;
2866 dma_addr_t dma_addr;
2868 sw_ring_cons = RX_RING_IDX(sw_cons);
2869 sw_ring_prod = RX_RING_IDX(sw_prod);
2871 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2876 dma_addr = pci_unmap_addr(rx_buf, mapping);
2878 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2879 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2880 PCI_DMA_FROMDEVICE);
2882 rx_hdr = (struct l2_fhdr *) skb->data;
2883 len = rx_hdr->l2_fhdr_pkt_len;
2885 if ((status = rx_hdr->l2_fhdr_status) &
2886 (L2_FHDR_ERRORS_BAD_CRC |
2887 L2_FHDR_ERRORS_PHY_DECODE |
2888 L2_FHDR_ERRORS_ALIGNMENT |
2889 L2_FHDR_ERRORS_TOO_SHORT |
2890 L2_FHDR_ERRORS_GIANT_FRAME)) {
2892 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2897 if (status & L2_FHDR_STATUS_SPLIT) {
2898 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2900 } else if (len > bp->rx_jumbo_thresh) {
2901 hdr_len = bp->rx_jumbo_thresh;
2907 if (len <= bp->rx_copy_thresh) {
2908 struct sk_buff *new_skb;
2910 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2911 if (new_skb == NULL) {
2912 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2918 skb_copy_from_linear_data_offset(skb,
2920 new_skb->data, len + 2);
2921 skb_reserve(new_skb, 2);
2922 skb_put(new_skb, len);
2924 bnx2_reuse_rx_skb(bp, rxr, skb,
2925 sw_ring_cons, sw_ring_prod);
2928 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2929 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2932 skb->protocol = eth_type_trans(skb, bp->dev);
2934 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2935 (ntohs(skb->protocol) != 0x8100)) {
2942 skb->ip_summed = CHECKSUM_NONE;
2944 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2945 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2947 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2948 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2949 skb->ip_summed = CHECKSUM_UNNECESSARY;
2953 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2954 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2955 rx_hdr->l2_fhdr_vlan_tag);
2959 netif_receive_skb(skb);
2961 bp->dev->last_rx = jiffies;
2965 sw_cons = NEXT_RX_BD(sw_cons);
2966 sw_prod = NEXT_RX_BD(sw_prod);
2968 if ((rx_pkt == budget))
2971 /* Refresh hw_cons to see if there is new work */
2972 if (sw_cons == hw_cons) {
2973 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2977 rxr->rx_cons = sw_cons;
2978 rxr->rx_prod = sw_prod;
2981 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2983 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2985 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2993 /* MSI ISR - The only difference between this and the INTx ISR
2994 * is that the MSI interrupt is always serviced.
2997 bnx2_msi(int irq, void *dev_instance)
2999 struct bnx2_napi *bnapi = dev_instance;
3000 struct bnx2 *bp = bnapi->bp;
3001 struct net_device *dev = bp->dev;
3003 prefetch(bnapi->status_blk.msi);
3004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3005 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3006 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3008 /* Return here if interrupt is disabled. */
3009 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3012 netif_rx_schedule(dev, &bnapi->napi);
3018 bnx2_msi_1shot(int irq, void *dev_instance)
3020 struct bnx2_napi *bnapi = dev_instance;
3021 struct bnx2 *bp = bnapi->bp;
3022 struct net_device *dev = bp->dev;
3024 prefetch(bnapi->status_blk.msi);
3026 /* Return here if interrupt is disabled. */
3027 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3030 netif_rx_schedule(dev, &bnapi->napi);
3036 bnx2_interrupt(int irq, void *dev_instance)
3038 struct bnx2_napi *bnapi = dev_instance;
3039 struct bnx2 *bp = bnapi->bp;
3040 struct net_device *dev = bp->dev;
3041 struct status_block *sblk = bnapi->status_blk.msi;
3043 /* When using INTx, it is possible for the interrupt to arrive
3044 * at the CPU before the status block posted prior to the
3045 * interrupt. Reading a register will flush the status block.
3046 * When using MSI, the MSI message will always complete after
3047 * the status block write.
3049 if ((sblk->status_idx == bnapi->last_status_idx) &&
3050 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3051 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3054 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3055 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3056 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3058 /* Read back to deassert IRQ immediately to avoid too many
3059 * spurious interrupts.
3061 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3063 /* Return here if interrupt is shared and is disabled. */
3064 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3067 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3068 bnapi->last_status_idx = sblk->status_idx;
3069 __netif_rx_schedule(dev, &bnapi->napi);
3076 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3078 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3079 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3081 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3082 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3087 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3088 STATUS_ATTN_BITS_TIMER_ABORT)
3091 bnx2_has_work(struct bnx2_napi *bnapi)
3093 struct status_block *sblk = bnapi->status_blk.msi;
3095 if (bnx2_has_fast_work(bnapi))
3098 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3099 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3105 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3107 struct status_block *sblk = bnapi->status_blk.msi;
3108 u32 status_attn_bits = sblk->status_attn_bits;
3109 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3111 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3112 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3114 bnx2_phy_int(bp, bnapi);
3116 /* This is needed to take care of transient status
3117 * during link changes.
3119 REG_WR(bp, BNX2_HC_COMMAND,
3120 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3121 REG_RD(bp, BNX2_HC_COMMAND);
3125 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3126 int work_done, int budget)
3128 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3129 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3131 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3132 bnx2_tx_int(bp, bnapi, 0);
3134 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3135 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3140 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3142 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3143 struct bnx2 *bp = bnapi->bp;
3145 struct status_block_msix *sblk = bnapi->status_blk.msix;
3148 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3149 if (unlikely(work_done >= budget))
3152 bnapi->last_status_idx = sblk->status_idx;
3153 /* status idx must be read before checking for more work. */
3155 if (likely(!bnx2_has_fast_work(bnapi))) {
3157 netif_rx_complete(bp->dev, napi);
3158 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3159 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3160 bnapi->last_status_idx);
3167 static int bnx2_poll(struct napi_struct *napi, int budget)
3169 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3170 struct bnx2 *bp = bnapi->bp;
3172 struct status_block *sblk = bnapi->status_blk.msi;
3175 bnx2_poll_link(bp, bnapi);
3177 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3179 if (unlikely(work_done >= budget))
3182 /* bnapi->last_status_idx is used below to tell the hw how
3183 * much work has been processed, so we must read it before
3184 * checking for more work.
3186 bnapi->last_status_idx = sblk->status_idx;
3188 if (likely(!bnx2_has_work(bnapi))) {
3189 netif_rx_complete(bp->dev, napi);
3190 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3191 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3192 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3193 bnapi->last_status_idx);
3196 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3197 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3198 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3199 bnapi->last_status_idx);
3201 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3202 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3203 bnapi->last_status_idx);
3211 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3212 * from set_multicast.
3215 bnx2_set_rx_mode(struct net_device *dev)
3217 struct bnx2 *bp = netdev_priv(dev);
3218 u32 rx_mode, sort_mode;
3221 spin_lock_bh(&bp->phy_lock);
3223 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3224 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3225 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3227 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3228 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3230 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3231 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3233 if (dev->flags & IFF_PROMISC) {
3234 /* Promiscuous mode. */
3235 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3236 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3237 BNX2_RPM_SORT_USER0_PROM_VLAN;
3239 else if (dev->flags & IFF_ALLMULTI) {
3240 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3241 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3244 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3247 /* Accept one or more multicast(s). */
3248 struct dev_mc_list *mclist;
3249 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3254 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3256 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3257 i++, mclist = mclist->next) {
3259 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3261 regidx = (bit & 0xe0) >> 5;
3263 mc_filter[regidx] |= (1 << bit);
3266 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3267 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3271 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3274 if (rx_mode != bp->rx_mode) {
3275 bp->rx_mode = rx_mode;
3276 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3279 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3280 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3281 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3283 spin_unlock_bh(&bp->phy_lock);
3287 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3293 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3294 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3295 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3296 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3297 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3300 for (i = 0; i < rv2p_code_len; i += 8) {
3301 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3303 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3306 if (rv2p_proc == RV2P_PROC1) {
3307 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3308 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3311 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3312 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3316 /* Reset the processor, un-stall is done later. */
3317 if (rv2p_proc == RV2P_PROC1) {
3318 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3321 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3326 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3333 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3334 val |= cpu_reg->mode_value_halt;
3335 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3336 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3338 /* Load the Text area. */
3339 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3343 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3348 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3349 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3353 /* Load the Data area. */
3354 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3358 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3359 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3363 /* Load the SBSS area. */
3364 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3368 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3369 bnx2_reg_wr_ind(bp, offset, 0);
3373 /* Load the BSS area. */
3374 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3378 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3379 bnx2_reg_wr_ind(bp, offset, 0);
3383 /* Load the Read-Only area. */
3384 offset = cpu_reg->spad_base +
3385 (fw->rodata_addr - cpu_reg->mips_view_base);
3389 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3390 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3394 /* Clear the pre-fetch instruction. */
3395 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3396 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3398 /* Start the CPU. */
3399 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3400 val &= ~cpu_reg->mode_value_halt;
3401 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3402 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3408 bnx2_init_cpus(struct bnx2 *bp)
3414 /* Initialize the RV2P processor. */
3415 text = vmalloc(FW_BUF_SIZE);
3418 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3419 rv2p = bnx2_xi_rv2p_proc1;
3420 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3422 rv2p = bnx2_rv2p_proc1;
3423 rv2p_len = sizeof(bnx2_rv2p_proc1);
3425 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3429 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3431 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3432 rv2p = bnx2_xi_rv2p_proc2;
3433 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3435 rv2p = bnx2_rv2p_proc2;
3436 rv2p_len = sizeof(bnx2_rv2p_proc2);
3438 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3442 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3444 /* Initialize the RX Processor. */
3445 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3446 fw = &bnx2_rxp_fw_09;
3448 fw = &bnx2_rxp_fw_06;
3451 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3455 /* Initialize the TX Processor. */
3456 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3457 fw = &bnx2_txp_fw_09;
3459 fw = &bnx2_txp_fw_06;
3462 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3466 /* Initialize the TX Patch-up Processor. */
3467 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3468 fw = &bnx2_tpat_fw_09;
3470 fw = &bnx2_tpat_fw_06;
3473 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3477 /* Initialize the Completion Processor. */
3478 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3479 fw = &bnx2_com_fw_09;
3481 fw = &bnx2_com_fw_06;
3484 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3488 /* Initialize the Command Processor. */
3489 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3490 fw = &bnx2_cp_fw_09;
3492 fw = &bnx2_cp_fw_06;
3495 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3503 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3507 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3513 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3514 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3515 PCI_PM_CTRL_PME_STATUS);
3517 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3518 /* delay required during transition out of D3hot */
3521 val = REG_RD(bp, BNX2_EMAC_MODE);
3522 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3523 val &= ~BNX2_EMAC_MODE_MPKT;
3524 REG_WR(bp, BNX2_EMAC_MODE, val);
3526 val = REG_RD(bp, BNX2_RPM_CONFIG);
3527 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3528 REG_WR(bp, BNX2_RPM_CONFIG, val);
3539 autoneg = bp->autoneg;
3540 advertising = bp->advertising;
3542 if (bp->phy_port == PORT_TP) {
3543 bp->autoneg = AUTONEG_SPEED;
3544 bp->advertising = ADVERTISED_10baseT_Half |
3545 ADVERTISED_10baseT_Full |
3546 ADVERTISED_100baseT_Half |
3547 ADVERTISED_100baseT_Full |
3551 spin_lock_bh(&bp->phy_lock);
3552 bnx2_setup_phy(bp, bp->phy_port);
3553 spin_unlock_bh(&bp->phy_lock);
3555 bp->autoneg = autoneg;
3556 bp->advertising = advertising;
3558 bnx2_set_mac_addr(bp);
3560 val = REG_RD(bp, BNX2_EMAC_MODE);
3562 /* Enable port mode. */
3563 val &= ~BNX2_EMAC_MODE_PORT;
3564 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3565 BNX2_EMAC_MODE_ACPI_RCVD |
3566 BNX2_EMAC_MODE_MPKT;
3567 if (bp->phy_port == PORT_TP)
3568 val |= BNX2_EMAC_MODE_PORT_MII;
3570 val |= BNX2_EMAC_MODE_PORT_GMII;
3571 if (bp->line_speed == SPEED_2500)
3572 val |= BNX2_EMAC_MODE_25G_MODE;
3575 REG_WR(bp, BNX2_EMAC_MODE, val);
3577 /* receive all multicast */
3578 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3579 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3582 REG_WR(bp, BNX2_EMAC_RX_MODE,
3583 BNX2_EMAC_RX_MODE_SORT_MODE);
3585 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3586 BNX2_RPM_SORT_USER0_MC_EN;
3587 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3588 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3589 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3590 BNX2_RPM_SORT_USER0_ENA);
3592 /* Need to enable EMAC and RPM for WOL. */
3593 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3594 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3595 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3596 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3598 val = REG_RD(bp, BNX2_RPM_CONFIG);
3599 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3600 REG_WR(bp, BNX2_RPM_CONFIG, val);
3602 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3605 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3608 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3609 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3611 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3612 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3613 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3622 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3624 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3627 /* No more memory access after this point until
3628 * device is brought back to D0.
3640 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3645 /* Request access to the flash interface. */
3646 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3647 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3648 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3649 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3655 if (j >= NVRAM_TIMEOUT_COUNT)
3662 bnx2_release_nvram_lock(struct bnx2 *bp)
3667 /* Relinquish nvram interface. */
3668 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3670 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3671 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3672 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3678 if (j >= NVRAM_TIMEOUT_COUNT)
3686 bnx2_enable_nvram_write(struct bnx2 *bp)
3690 val = REG_RD(bp, BNX2_MISC_CFG);
3691 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3693 if (bp->flash_info->flags & BNX2_NV_WREN) {
3696 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3697 REG_WR(bp, BNX2_NVM_COMMAND,
3698 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3700 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3703 val = REG_RD(bp, BNX2_NVM_COMMAND);
3704 if (val & BNX2_NVM_COMMAND_DONE)
3708 if (j >= NVRAM_TIMEOUT_COUNT)
3715 bnx2_disable_nvram_write(struct bnx2 *bp)
3719 val = REG_RD(bp, BNX2_MISC_CFG);
3720 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3725 bnx2_enable_nvram_access(struct bnx2 *bp)
3729 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3730 /* Enable both bits, even on read. */
3731 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3732 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3736 bnx2_disable_nvram_access(struct bnx2 *bp)
3740 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3741 /* Disable both bits, even after read. */
3742 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3743 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3744 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3748 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3753 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3754 /* Buffered flash, no erase needed */
3757 /* Build an erase command */
3758 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3759 BNX2_NVM_COMMAND_DOIT;
3761 /* Need to clear DONE bit separately. */
3762 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3764 /* Address of the NVRAM to read from. */
3765 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3767 /* Issue an erase command. */
3768 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3770 /* Wait for completion. */
3771 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3776 val = REG_RD(bp, BNX2_NVM_COMMAND);
3777 if (val & BNX2_NVM_COMMAND_DONE)
3781 if (j >= NVRAM_TIMEOUT_COUNT)
3788 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3793 /* Build the command word. */
3794 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3796 /* Calculate an offset of a buffered flash, not needed for 5709. */
3797 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3798 offset = ((offset / bp->flash_info->page_size) <<
3799 bp->flash_info->page_bits) +
3800 (offset % bp->flash_info->page_size);
3803 /* Need to clear DONE bit separately. */
3804 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3806 /* Address of the NVRAM to read from. */
3807 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3809 /* Issue a read command. */
3810 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3812 /* Wait for completion. */
3813 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3818 val = REG_RD(bp, BNX2_NVM_COMMAND);
3819 if (val & BNX2_NVM_COMMAND_DONE) {
3820 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3821 memcpy(ret_val, &v, 4);
3825 if (j >= NVRAM_TIMEOUT_COUNT)
3833 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3839 /* Build the command word. */
3840 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3842 /* Calculate an offset of a buffered flash, not needed for 5709. */
3843 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3844 offset = ((offset / bp->flash_info->page_size) <<
3845 bp->flash_info->page_bits) +
3846 (offset % bp->flash_info->page_size);
3849 /* Need to clear DONE bit separately. */
3850 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3852 memcpy(&val32, val, 4);
3854 /* Write the data. */
3855 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3857 /* Address of the NVRAM to write to. */
3858 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3860 /* Issue the write command. */
3861 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3863 /* Wait for completion. */
3864 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3867 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3870 if (j >= NVRAM_TIMEOUT_COUNT)
3877 bnx2_init_nvram(struct bnx2 *bp)
3880 int j, entry_count, rc = 0;
3881 struct flash_spec *flash;
3883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3884 bp->flash_info = &flash_5709;
3885 goto get_flash_size;
3888 /* Determine the selected interface. */
3889 val = REG_RD(bp, BNX2_NVM_CFG1);
3891 entry_count = ARRAY_SIZE(flash_table);
3893 if (val & 0x40000000) {
3895 /* Flash interface has been reconfigured */
3896 for (j = 0, flash = &flash_table[0]; j < entry_count;
3898 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3899 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3900 bp->flash_info = flash;
3907 /* Not yet been reconfigured */
3909 if (val & (1 << 23))
3910 mask = FLASH_BACKUP_STRAP_MASK;
3912 mask = FLASH_STRAP_MASK;
3914 for (j = 0, flash = &flash_table[0]; j < entry_count;
3917 if ((val & mask) == (flash->strapping & mask)) {
3918 bp->flash_info = flash;
3920 /* Request access to the flash interface. */
3921 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3924 /* Enable access to flash interface */
3925 bnx2_enable_nvram_access(bp);
3927 /* Reconfigure the flash interface */
3928 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3929 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3930 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3931 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3933 /* Disable access to flash interface */
3934 bnx2_disable_nvram_access(bp);
3935 bnx2_release_nvram_lock(bp);
3940 } /* if (val & 0x40000000) */
3942 if (j == entry_count) {
3943 bp->flash_info = NULL;
3944 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3949 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3950 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3952 bp->flash_size = val;
3954 bp->flash_size = bp->flash_info->total_size;
3960 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3964 u32 cmd_flags, offset32, len32, extra;
3969 /* Request access to the flash interface. */
3970 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3973 /* Enable access to flash interface */
3974 bnx2_enable_nvram_access(bp);
3987 pre_len = 4 - (offset & 3);
3989 if (pre_len >= len32) {
3991 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3992 BNX2_NVM_COMMAND_LAST;
3995 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3998 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4003 memcpy(ret_buf, buf + (offset & 3), pre_len);
4010 extra = 4 - (len32 & 3);
4011 len32 = (len32 + 4) & ~3;
4018 cmd_flags = BNX2_NVM_COMMAND_LAST;
4020 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4021 BNX2_NVM_COMMAND_LAST;
4023 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4025 memcpy(ret_buf, buf, 4 - extra);
4027 else if (len32 > 0) {
4030 /* Read the first word. */
4034 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4036 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4038 /* Advance to the next dword. */
4043 while (len32 > 4 && rc == 0) {
4044 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4046 /* Advance to the next dword. */
4055 cmd_flags = BNX2_NVM_COMMAND_LAST;
4056 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4058 memcpy(ret_buf, buf, 4 - extra);
4061 /* Disable access to flash interface */
4062 bnx2_disable_nvram_access(bp);
4064 bnx2_release_nvram_lock(bp);
4070 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4073 u32 written, offset32, len32;
4074 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4076 int align_start, align_end;
4081 align_start = align_end = 0;
4083 if ((align_start = (offset32 & 3))) {
4085 len32 += align_start;
4088 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4093 align_end = 4 - (len32 & 3);
4095 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4099 if (align_start || align_end) {
4100 align_buf = kmalloc(len32, GFP_KERNEL);
4101 if (align_buf == NULL)
4104 memcpy(align_buf, start, 4);
4107 memcpy(align_buf + len32 - 4, end, 4);
4109 memcpy(align_buf + align_start, data_buf, buf_size);
4113 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4114 flash_buffer = kmalloc(264, GFP_KERNEL);
4115 if (flash_buffer == NULL) {
4117 goto nvram_write_end;
4122 while ((written < len32) && (rc == 0)) {
4123 u32 page_start, page_end, data_start, data_end;
4124 u32 addr, cmd_flags;
4127 /* Find the page_start addr */
4128 page_start = offset32 + written;
4129 page_start -= (page_start % bp->flash_info->page_size);
4130 /* Find the page_end addr */
4131 page_end = page_start + bp->flash_info->page_size;
4132 /* Find the data_start addr */
4133 data_start = (written == 0) ? offset32 : page_start;
4134 /* Find the data_end addr */
4135 data_end = (page_end > offset32 + len32) ?
4136 (offset32 + len32) : page_end;
4138 /* Request access to the flash interface. */
4139 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4140 goto nvram_write_end;
4142 /* Enable access to flash interface */
4143 bnx2_enable_nvram_access(bp);
4145 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4146 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4149 /* Read the whole page into the buffer
4150 * (non-buffer flash only) */
4151 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4152 if (j == (bp->flash_info->page_size - 4)) {
4153 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4155 rc = bnx2_nvram_read_dword(bp,
4161 goto nvram_write_end;
4167 /* Enable writes to flash interface (unlock write-protect) */
4168 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4169 goto nvram_write_end;
4171 /* Loop to write back the buffer data from page_start to
4174 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4175 /* Erase the page */
4176 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4177 goto nvram_write_end;
4179 /* Re-enable the write again for the actual write */
4180 bnx2_enable_nvram_write(bp);
4182 for (addr = page_start; addr < data_start;
4183 addr += 4, i += 4) {
4185 rc = bnx2_nvram_write_dword(bp, addr,
4186 &flash_buffer[i], cmd_flags);
4189 goto nvram_write_end;
4195 /* Loop to write the new data from data_start to data_end */
4196 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4197 if ((addr == page_end - 4) ||
4198 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4199 (addr == data_end - 4))) {
4201 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4203 rc = bnx2_nvram_write_dword(bp, addr, buf,
4207 goto nvram_write_end;
4213 /* Loop to write back the buffer data from data_end
4215 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4216 for (addr = data_end; addr < page_end;
4217 addr += 4, i += 4) {
4219 if (addr == page_end-4) {
4220 cmd_flags = BNX2_NVM_COMMAND_LAST;
4222 rc = bnx2_nvram_write_dword(bp, addr,
4223 &flash_buffer[i], cmd_flags);
4226 goto nvram_write_end;
4232 /* Disable writes to flash interface (lock write-protect) */
4233 bnx2_disable_nvram_write(bp);
4235 /* Disable access to flash interface */
4236 bnx2_disable_nvram_access(bp);
4237 bnx2_release_nvram_lock(bp);
4239 /* Increment written */
4240 written += data_end - data_start;
4244 kfree(flash_buffer);
4250 bnx2_init_remote_phy(struct bnx2 *bp)
4254 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4255 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4258 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4259 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4262 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4263 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4265 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4266 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4267 bp->phy_port = PORT_FIBRE;
4269 bp->phy_port = PORT_TP;
4271 if (netif_running(bp->dev)) {
4274 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4275 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4276 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4282 bnx2_setup_msix_tbl(struct bnx2 *bp)
4284 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4286 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4287 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4291 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4297 /* Wait for the current PCI transaction to complete before
4298 * issuing a reset. */
4299 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4300 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4301 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4302 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4303 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4304 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4307 /* Wait for the firmware to tell us it is ok to issue a reset. */
4308 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4310 /* Deposit a driver reset signature so the firmware knows that
4311 * this is a soft reset. */
4312 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4313 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4315 /* Do a dummy read to force the chip to complete all current transaction
4316 * before we issue a reset. */
4317 val = REG_RD(bp, BNX2_MISC_ID);
4319 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4320 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4321 REG_RD(bp, BNX2_MISC_COMMAND);
4324 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4325 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4327 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4330 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4331 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4332 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4335 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4337 /* Reading back any register after chip reset will hang the
4338 * bus on 5706 A0 and A1. The msleep below provides plenty
4339 * of margin for write posting.
4341 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4342 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4345 /* Reset takes approximate 30 usec */
4346 for (i = 0; i < 10; i++) {
4347 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4348 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4349 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4354 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4355 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4356 printk(KERN_ERR PFX "Chip reset did not complete\n");
4361 /* Make sure byte swapping is properly configured. */
4362 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4363 if (val != 0x01020304) {
4364 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4368 /* Wait for the firmware to finish its initialization. */
4369 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4373 spin_lock_bh(&bp->phy_lock);
4374 old_port = bp->phy_port;
4375 bnx2_init_remote_phy(bp);
4376 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4377 old_port != bp->phy_port)
4378 bnx2_set_default_remote_link(bp);
4379 spin_unlock_bh(&bp->phy_lock);
4381 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4382 /* Adjust the voltage regular to two steps lower. The default
4383 * of this register is 0x0000000e. */
4384 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4386 /* Remove bad rbuf memory from the free pool. */
4387 rc = bnx2_alloc_bad_rbuf(bp);
4390 if (bp->flags & BNX2_FLAG_USING_MSIX)
4391 bnx2_setup_msix_tbl(bp);
4397 bnx2_init_chip(struct bnx2 *bp)
4402 /* Make sure the interrupt is not active. */
4403 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4405 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4406 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4408 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4410 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4411 DMA_READ_CHANS << 12 |
4412 DMA_WRITE_CHANS << 16;
4414 val |= (0x2 << 20) | (1 << 11);
4416 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4419 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4420 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4421 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4423 REG_WR(bp, BNX2_DMA_CONFIG, val);
4425 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4426 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4427 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4428 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4431 if (bp->flags & BNX2_FLAG_PCIX) {
4434 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4436 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4437 val16 & ~PCI_X_CMD_ERO);
4440 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4441 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4442 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4443 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4445 /* Initialize context mapping and zero out the quick contexts. The
4446 * context block must have already been enabled. */
4447 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4448 rc = bnx2_init_5709_context(bp);
4452 bnx2_init_context(bp);
4454 if ((rc = bnx2_init_cpus(bp)) != 0)
4457 bnx2_init_nvram(bp);
4459 bnx2_set_mac_addr(bp);
4461 val = REG_RD(bp, BNX2_MQ_CONFIG);
4462 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4463 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4464 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4465 val |= BNX2_MQ_CONFIG_HALT_DIS;
4467 REG_WR(bp, BNX2_MQ_CONFIG, val);
4469 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4470 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4471 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4473 val = (BCM_PAGE_BITS - 8) << 24;
4474 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4476 /* Configure page size. */
4477 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4478 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4479 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4480 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4482 val = bp->mac_addr[0] +
4483 (bp->mac_addr[1] << 8) +
4484 (bp->mac_addr[2] << 16) +
4486 (bp->mac_addr[4] << 8) +
4487 (bp->mac_addr[5] << 16);
4488 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4490 /* Program the MTU. Also include 4 bytes for CRC32. */
4491 val = bp->dev->mtu + ETH_HLEN + 4;
4492 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4493 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4494 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4496 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4497 bp->bnx2_napi[i].last_status_idx = 0;
4499 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4501 /* Set up how to generate a link change interrupt. */
4502 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4504 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4505 (u64) bp->status_blk_mapping & 0xffffffff);
4506 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4508 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4509 (u64) bp->stats_blk_mapping & 0xffffffff);
4510 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4511 (u64) bp->stats_blk_mapping >> 32);
4513 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4514 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4516 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4517 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4519 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4520 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4522 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4524 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4526 REG_WR(bp, BNX2_HC_COM_TICKS,
4527 (bp->com_ticks_int << 16) | bp->com_ticks);
4529 REG_WR(bp, BNX2_HC_CMD_TICKS,
4530 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4532 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4533 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4535 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4536 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4538 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4539 val = BNX2_HC_CONFIG_COLLECT_STATS;
4541 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4542 BNX2_HC_CONFIG_COLLECT_STATS;
4545 if (bp->irq_nvecs > 1) {
4546 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4547 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4549 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4552 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4553 val |= BNX2_HC_CONFIG_ONE_SHOT;
4555 REG_WR(bp, BNX2_HC_CONFIG, val);
4557 for (i = 1; i < bp->irq_nvecs; i++) {
4558 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4559 BNX2_HC_SB_CONFIG_1;
4562 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4563 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4564 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4566 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4567 (bp->tx_quick_cons_trip_int << 16) |
4568 bp->tx_quick_cons_trip);
4570 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4571 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4573 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4574 (bp->rx_quick_cons_trip_int << 16) |
4575 bp->rx_quick_cons_trip);
4577 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4578 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4581 /* Clear internal stats counters. */
4582 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4584 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4586 /* Initialize the receive filter. */
4587 bnx2_set_rx_mode(bp->dev);
4589 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4590 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4591 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4592 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4594 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4597 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4598 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4602 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4608 bnx2_clear_ring_states(struct bnx2 *bp)
4610 struct bnx2_napi *bnapi;
4611 struct bnx2_tx_ring_info *txr;
4612 struct bnx2_rx_ring_info *rxr;
4615 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4616 bnapi = &bp->bnx2_napi[i];
4617 txr = &bnapi->tx_ring;
4618 rxr = &bnapi->rx_ring;
4621 txr->hw_tx_cons = 0;
4622 rxr->rx_prod_bseq = 0;
4625 rxr->rx_pg_prod = 0;
4626 rxr->rx_pg_cons = 0;
4631 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4633 u32 val, offset0, offset1, offset2, offset3;
4634 u32 cid_addr = GET_CID_ADDR(cid);
4636 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4637 offset0 = BNX2_L2CTX_TYPE_XI;
4638 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4639 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4640 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4642 offset0 = BNX2_L2CTX_TYPE;
4643 offset1 = BNX2_L2CTX_CMD_TYPE;
4644 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4645 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4647 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4648 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4650 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4651 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4653 val = (u64) txr->tx_desc_mapping >> 32;
4654 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4656 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4657 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4661 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4665 struct bnx2_napi *bnapi;
4666 struct bnx2_tx_ring_info *txr;
4668 bnapi = &bp->bnx2_napi[ring_num];
4669 txr = &bnapi->tx_ring;
4674 cid = TX_TSS_CID + ring_num - 1;
4676 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4678 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4680 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4681 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4684 txr->tx_prod_bseq = 0;
4686 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4687 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4689 bnx2_init_tx_context(bp, cid, txr);
4693 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4699 for (i = 0; i < num_rings; i++) {
4702 rxbd = &rx_ring[i][0];
4703 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4704 rxbd->rx_bd_len = buf_size;
4705 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4707 if (i == (num_rings - 1))
4711 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4712 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4717 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4720 u16 prod, ring_prod;
4721 u32 cid, rx_cid_addr, val;
4722 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4723 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4728 cid = RX_RSS_CID + ring_num - 1;
4730 rx_cid_addr = GET_CID_ADDR(cid);
4732 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4733 bp->rx_buf_use_size, bp->rx_max_ring);
4735 bnx2_init_rx_context(bp, cid);
4737 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4738 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4739 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4742 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4743 if (bp->rx_pg_ring_size) {
4744 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4745 rxr->rx_pg_desc_mapping,
4746 PAGE_SIZE, bp->rx_max_pg_ring);
4747 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4748 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4749 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4750 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4752 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4753 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4755 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4756 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4758 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4759 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4762 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4763 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4765 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4766 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4768 ring_prod = prod = rxr->rx_pg_prod;
4769 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4770 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4772 prod = NEXT_RX_BD(prod);
4773 ring_prod = RX_PG_RING_IDX(prod);
4775 rxr->rx_pg_prod = prod;
4777 ring_prod = prod = rxr->rx_prod;
4778 for (i = 0; i < bp->rx_ring_size; i++) {
4779 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4781 prod = NEXT_RX_BD(prod);
4782 ring_prod = RX_RING_IDX(prod);
4784 rxr->rx_prod = prod;
4786 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4787 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4788 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4790 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4791 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4793 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4797 bnx2_init_all_rings(struct bnx2 *bp)
4802 bnx2_clear_ring_states(bp);
4804 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4805 for (i = 0; i < bp->num_tx_rings; i++)
4806 bnx2_init_tx_ring(bp, i);
4808 if (bp->num_tx_rings > 1)
4809 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4812 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4813 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4815 for (i = 0; i < bp->num_rx_rings; i++)
4816 bnx2_init_rx_ring(bp, i);
4818 if (bp->num_rx_rings > 1) {
4820 u8 *tbl = (u8 *) &tbl_32;
4822 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4823 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4825 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4826 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4829 BNX2_RXP_SCRATCH_RSS_TBL + i,
4830 cpu_to_be32(tbl_32));
4833 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4834 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4836 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4841 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4843 u32 max, num_rings = 1;
4845 while (ring_size > MAX_RX_DESC_CNT) {
4846 ring_size -= MAX_RX_DESC_CNT;
4849 /* round to next power of 2 */
4851 while ((max & num_rings) == 0)
4854 if (num_rings != max)
4861 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4863 u32 rx_size, rx_space, jumbo_size;
4865 /* 8 for CRC and VLAN */
4866 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4868 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4869 sizeof(struct skb_shared_info);
4871 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4872 bp->rx_pg_ring_size = 0;
4873 bp->rx_max_pg_ring = 0;
4874 bp->rx_max_pg_ring_idx = 0;
4875 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4876 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4878 jumbo_size = size * pages;
4879 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4880 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4882 bp->rx_pg_ring_size = jumbo_size;
4883 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4885 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4886 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4887 bp->rx_copy_thresh = 0;
4890 bp->rx_buf_use_size = rx_size;
4892 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4893 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4894 bp->rx_ring_size = size;
4895 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4896 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4900 bnx2_free_tx_skbs(struct bnx2 *bp)
4904 for (i = 0; i < bp->num_tx_rings; i++) {
4905 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4906 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4909 if (txr->tx_buf_ring == NULL)
4912 for (j = 0; j < TX_DESC_CNT; ) {
4913 struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4914 struct sk_buff *skb = tx_buf->skb;
4922 pci_unmap_single(bp->pdev,
4923 pci_unmap_addr(tx_buf, mapping),
4924 skb_headlen(skb), PCI_DMA_TODEVICE);
4928 last = skb_shinfo(skb)->nr_frags;
4929 for (k = 0; k < last; k++) {
4930 tx_buf = &txr->tx_buf_ring[j + k + 1];
4931 pci_unmap_page(bp->pdev,
4932 pci_unmap_addr(tx_buf, mapping),
4933 skb_shinfo(skb)->frags[j].size,
4943 bnx2_free_rx_skbs(struct bnx2 *bp)
4947 for (i = 0; i < bp->num_rx_rings; i++) {
4948 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4949 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4952 if (rxr->rx_buf_ring == NULL)
4955 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4956 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4957 struct sk_buff *skb = rx_buf->skb;
4962 pci_unmap_single(bp->pdev,
4963 pci_unmap_addr(rx_buf, mapping),
4964 bp->rx_buf_use_size,
4965 PCI_DMA_FROMDEVICE);
4971 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
4972 bnx2_free_rx_page(bp, rxr, j);
4977 bnx2_free_skbs(struct bnx2 *bp)
4979 bnx2_free_tx_skbs(bp);
4980 bnx2_free_rx_skbs(bp);
4984 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4988 rc = bnx2_reset_chip(bp, reset_code);
4993 if ((rc = bnx2_init_chip(bp)) != 0)
4996 bnx2_init_all_rings(bp);
5001 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5005 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5008 spin_lock_bh(&bp->phy_lock);
5009 bnx2_init_phy(bp, reset_phy);
5011 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5012 bnx2_remote_phy_event(bp);
5013 spin_unlock_bh(&bp->phy_lock);
5018 bnx2_test_registers(struct bnx2 *bp)
5022 static const struct {
5025 #define BNX2_FL_NOT_5709 1
5029 { 0x006c, 0, 0x00000000, 0x0000003f },
5030 { 0x0090, 0, 0xffffffff, 0x00000000 },
5031 { 0x0094, 0, 0x00000000, 0x00000000 },
5033 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5034 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5035 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5036 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5037 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5038 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5039 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5040 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5041 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5043 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5044 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5045 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5046 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5047 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5048 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5050 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5051 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5052 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5054 { 0x1000, 0, 0x00000000, 0x00000001 },
5055 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5057 { 0x1408, 0, 0x01c00800, 0x00000000 },
5058 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5059 { 0x14a8, 0, 0x00000000, 0x000001ff },
5060 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5061 { 0x14b0, 0, 0x00000002, 0x00000001 },
5062 { 0x14b8, 0, 0x00000000, 0x00000000 },
5063 { 0x14c0, 0, 0x00000000, 0x00000009 },
5064 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5065 { 0x14cc, 0, 0x00000000, 0x00000001 },
5066 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5068 { 0x1800, 0, 0x00000000, 0x00000001 },
5069 { 0x1804, 0, 0x00000000, 0x00000003 },
5071 { 0x2800, 0, 0x00000000, 0x00000001 },
5072 { 0x2804, 0, 0x00000000, 0x00003f01 },
5073 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5074 { 0x2810, 0, 0xffff0000, 0x00000000 },
5075 { 0x2814, 0, 0xffff0000, 0x00000000 },
5076 { 0x2818, 0, 0xffff0000, 0x00000000 },
5077 { 0x281c, 0, 0xffff0000, 0x00000000 },
5078 { 0x2834, 0, 0xffffffff, 0x00000000 },
5079 { 0x2840, 0, 0x00000000, 0xffffffff },
5080 { 0x2844, 0, 0x00000000, 0xffffffff },
5081 { 0x2848, 0, 0xffffffff, 0x00000000 },
5082 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5084 { 0x2c00, 0, 0x00000000, 0x00000011 },
5085 { 0x2c04, 0, 0x00000000, 0x00030007 },
5087 { 0x3c00, 0, 0x00000000, 0x00000001 },
5088 { 0x3c04, 0, 0x00000000, 0x00070000 },
5089 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5090 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5091 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5092 { 0x3c14, 0, 0x00000000, 0xffffffff },
5093 { 0x3c18, 0, 0x00000000, 0xffffffff },
5094 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5095 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5097 { 0x5004, 0, 0x00000000, 0x0000007f },
5098 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5100 { 0x5c00, 0, 0x00000000, 0x00000001 },
5101 { 0x5c04, 0, 0x00000000, 0x0003000f },
5102 { 0x5c08, 0, 0x00000003, 0x00000000 },
5103 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5104 { 0x5c10, 0, 0x00000000, 0xffffffff },
5105 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5106 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5107 { 0x5c88, 0, 0x00000000, 0x00077373 },
5108 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5110 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5111 { 0x680c, 0, 0xffffffff, 0x00000000 },
5112 { 0x6810, 0, 0xffffffff, 0x00000000 },
5113 { 0x6814, 0, 0xffffffff, 0x00000000 },
5114 { 0x6818, 0, 0xffffffff, 0x00000000 },
5115 { 0x681c, 0, 0xffffffff, 0x00000000 },
5116 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5117 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5118 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5119 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5120 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5121 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5122 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5123 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5124 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5125 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5126 { 0x684c, 0, 0xffffffff, 0x00000000 },
5127 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5128 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5129 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5130 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5131 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5132 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5134 { 0xffff, 0, 0x00000000, 0x00000000 },
5139 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5142 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5143 u32 offset, rw_mask, ro_mask, save_val, val;
5144 u16 flags = reg_tbl[i].flags;
5146 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5149 offset = (u32) reg_tbl[i].offset;
5150 rw_mask = reg_tbl[i].rw_mask;
5151 ro_mask = reg_tbl[i].ro_mask;
5153 save_val = readl(bp->regview + offset);
5155 writel(0, bp->regview + offset);
5157 val = readl(bp->regview + offset);
5158 if ((val & rw_mask) != 0) {
5162 if ((val & ro_mask) != (save_val & ro_mask)) {
5166 writel(0xffffffff, bp->regview + offset);
5168 val = readl(bp->regview + offset);
5169 if ((val & rw_mask) != rw_mask) {
5173 if ((val & ro_mask) != (save_val & ro_mask)) {
5177 writel(save_val, bp->regview + offset);
5181 writel(save_val, bp->regview + offset);
5189 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5191 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5192 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5195 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5198 for (offset = 0; offset < size; offset += 4) {
5200 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5202 if (bnx2_reg_rd_ind(bp, start + offset) !=
5212 bnx2_test_memory(struct bnx2 *bp)
5216 static struct mem_entry {
5219 } mem_tbl_5706[] = {
5220 { 0x60000, 0x4000 },
5221 { 0xa0000, 0x3000 },
5222 { 0xe0000, 0x4000 },
5223 { 0x120000, 0x4000 },
5224 { 0x1a0000, 0x4000 },
5225 { 0x160000, 0x4000 },
5229 { 0x60000, 0x4000 },
5230 { 0xa0000, 0x3000 },
5231 { 0xe0000, 0x4000 },
5232 { 0x120000, 0x4000 },
5233 { 0x1a0000, 0x4000 },
5236 struct mem_entry *mem_tbl;
5238 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5239 mem_tbl = mem_tbl_5709;
5241 mem_tbl = mem_tbl_5706;
5243 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5244 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5245 mem_tbl[i].len)) != 0) {
5253 #define BNX2_MAC_LOOPBACK 0
5254 #define BNX2_PHY_LOOPBACK 1
5257 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5259 unsigned int pkt_size, num_pkts, i;
5260 struct sk_buff *skb, *rx_skb;
5261 unsigned char *packet;
5262 u16 rx_start_idx, rx_idx;
5265 struct sw_bd *rx_buf;
5266 struct l2_fhdr *rx_hdr;
5268 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5269 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5270 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5274 txr = &tx_napi->tx_ring;
5275 rxr = &bnapi->rx_ring;
5276 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5277 bp->loopback = MAC_LOOPBACK;
5278 bnx2_set_mac_loopback(bp);
5280 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5281 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5284 bp->loopback = PHY_LOOPBACK;
5285 bnx2_set_phy_loopback(bp);
5290 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5291 skb = netdev_alloc_skb(bp->dev, pkt_size);
5294 packet = skb_put(skb, pkt_size);
5295 memcpy(packet, bp->dev->dev_addr, 6);
5296 memset(packet + 6, 0x0, 8);
5297 for (i = 14; i < pkt_size; i++)
5298 packet[i] = (unsigned char) (i & 0xff);
5300 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5303 REG_WR(bp, BNX2_HC_COMMAND,
5304 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5306 REG_RD(bp, BNX2_HC_COMMAND);
5309 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5313 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5315 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5316 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5317 txbd->tx_bd_mss_nbytes = pkt_size;
5318 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5321 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5322 txr->tx_prod_bseq += pkt_size;
5324 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5325 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5329 REG_WR(bp, BNX2_HC_COMMAND,
5330 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5332 REG_RD(bp, BNX2_HC_COMMAND);
5336 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5339 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5340 goto loopback_test_done;
5342 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5343 if (rx_idx != rx_start_idx + num_pkts) {
5344 goto loopback_test_done;
5347 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5348 rx_skb = rx_buf->skb;
5350 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5351 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5353 pci_dma_sync_single_for_cpu(bp->pdev,
5354 pci_unmap_addr(rx_buf, mapping),
5355 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5357 if (rx_hdr->l2_fhdr_status &
5358 (L2_FHDR_ERRORS_BAD_CRC |
5359 L2_FHDR_ERRORS_PHY_DECODE |
5360 L2_FHDR_ERRORS_ALIGNMENT |
5361 L2_FHDR_ERRORS_TOO_SHORT |
5362 L2_FHDR_ERRORS_GIANT_FRAME)) {
5364 goto loopback_test_done;
5367 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5368 goto loopback_test_done;
5371 for (i = 14; i < pkt_size; i++) {
5372 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5373 goto loopback_test_done;
5384 #define BNX2_MAC_LOOPBACK_FAILED 1
5385 #define BNX2_PHY_LOOPBACK_FAILED 2
5386 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5387 BNX2_PHY_LOOPBACK_FAILED)
5390 bnx2_test_loopback(struct bnx2 *bp)
5394 if (!netif_running(bp->dev))
5395 return BNX2_LOOPBACK_FAILED;
5397 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5398 spin_lock_bh(&bp->phy_lock);
5399 bnx2_init_phy(bp, 1);
5400 spin_unlock_bh(&bp->phy_lock);
5401 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5402 rc |= BNX2_MAC_LOOPBACK_FAILED;
5403 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5404 rc |= BNX2_PHY_LOOPBACK_FAILED;
5408 #define NVRAM_SIZE 0x200
5409 #define CRC32_RESIDUAL 0xdebb20e3
5412 bnx2_test_nvram(struct bnx2 *bp)
5414 __be32 buf[NVRAM_SIZE / 4];
5415 u8 *data = (u8 *) buf;
5419 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5420 goto test_nvram_done;
5422 magic = be32_to_cpu(buf[0]);
5423 if (magic != 0x669955aa) {
5425 goto test_nvram_done;
5428 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5429 goto test_nvram_done;
5431 csum = ether_crc_le(0x100, data);
5432 if (csum != CRC32_RESIDUAL) {
5434 goto test_nvram_done;
5437 csum = ether_crc_le(0x100, data + 0x100);
5438 if (csum != CRC32_RESIDUAL) {
5447 bnx2_test_link(struct bnx2 *bp)
5451 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5456 spin_lock_bh(&bp->phy_lock);
5457 bnx2_enable_bmsr1(bp);
5458 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5459 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5460 bnx2_disable_bmsr1(bp);
5461 spin_unlock_bh(&bp->phy_lock);
5463 if (bmsr & BMSR_LSTATUS) {
5470 bnx2_test_intr(struct bnx2 *bp)
5475 if (!netif_running(bp->dev))
5478 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5480 /* This register is not touched during run-time. */
5481 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5482 REG_RD(bp, BNX2_HC_COMMAND);
5484 for (i = 0; i < 10; i++) {
5485 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5491 msleep_interruptible(10);
5499 /* Determining link for parallel detection. */
5501 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5503 u32 mode_ctl, an_dbg, exp;
5505 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5508 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5509 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5511 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5514 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5515 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5516 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5518 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5521 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5522 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5523 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5525 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5532 bnx2_5706_serdes_timer(struct bnx2 *bp)
5536 spin_lock(&bp->phy_lock);
5537 if (bp->serdes_an_pending) {
5538 bp->serdes_an_pending--;
5540 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5543 bp->current_interval = bp->timer_interval;
5545 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5547 if (bmcr & BMCR_ANENABLE) {
5548 if (bnx2_5706_serdes_has_link(bp)) {
5549 bmcr &= ~BMCR_ANENABLE;
5550 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5551 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5552 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5556 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5557 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5560 bnx2_write_phy(bp, 0x17, 0x0f01);
5561 bnx2_read_phy(bp, 0x15, &phy2);
5565 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5566 bmcr |= BMCR_ANENABLE;
5567 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5569 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5572 bp->current_interval = bp->timer_interval;
5577 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5578 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5579 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5581 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5582 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5583 bnx2_5706s_force_link_dn(bp, 1);
5584 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5587 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5590 spin_unlock(&bp->phy_lock);
5594 bnx2_5708_serdes_timer(struct bnx2 *bp)
5596 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5599 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5600 bp->serdes_an_pending = 0;
5604 spin_lock(&bp->phy_lock);
5605 if (bp->serdes_an_pending)
5606 bp->serdes_an_pending--;
5607 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5610 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5611 if (bmcr & BMCR_ANENABLE) {
5612 bnx2_enable_forced_2g5(bp);
5613 bp->current_interval = SERDES_FORCED_TIMEOUT;
5615 bnx2_disable_forced_2g5(bp);
5616 bp->serdes_an_pending = 2;
5617 bp->current_interval = bp->timer_interval;
5621 bp->current_interval = bp->timer_interval;
5623 spin_unlock(&bp->phy_lock);
5627 bnx2_timer(unsigned long data)
5629 struct bnx2 *bp = (struct bnx2 *) data;
5631 if (!netif_running(bp->dev))
5634 if (atomic_read(&bp->intr_sem) != 0)
5635 goto bnx2_restart_timer;
5637 bnx2_send_heart_beat(bp);
5639 bp->stats_blk->stat_FwRxDrop =
5640 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5642 /* workaround occasional corrupted counters */
5643 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5644 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5645 BNX2_HC_COMMAND_STATS_NOW);
5647 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5648 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5649 bnx2_5706_serdes_timer(bp);
5651 bnx2_5708_serdes_timer(bp);
5655 mod_timer(&bp->timer, jiffies + bp->current_interval);
5659 bnx2_request_irq(struct bnx2 *bp)
5661 unsigned long flags;
5662 struct bnx2_irq *irq;
5665 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5668 flags = IRQF_SHARED;
5670 for (i = 0; i < bp->irq_nvecs; i++) {
5671 irq = &bp->irq_tbl[i];
5672 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5682 bnx2_free_irq(struct bnx2 *bp)
5684 struct bnx2_irq *irq;
5687 for (i = 0; i < bp->irq_nvecs; i++) {
5688 irq = &bp->irq_tbl[i];
5690 free_irq(irq->vector, &bp->bnx2_napi[i]);
5693 if (bp->flags & BNX2_FLAG_USING_MSI)
5694 pci_disable_msi(bp->pdev);
5695 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5696 pci_disable_msix(bp->pdev);
5698 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5702 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5705 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5707 bnx2_setup_msix_tbl(bp);
5708 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5709 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5710 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5712 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5713 msix_ent[i].entry = i;
5714 msix_ent[i].vector = 0;
5716 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5717 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5720 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5724 bp->irq_nvecs = msix_vecs;
5725 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5726 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5727 bp->irq_tbl[i].vector = msix_ent[i].vector;
5731 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5733 int cpus = num_online_cpus();
5734 int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5736 bp->irq_tbl[0].handler = bnx2_interrupt;
5737 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5739 bp->irq_tbl[0].vector = bp->pdev->irq;
5741 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5742 bnx2_enable_msix(bp, msix_vecs);
5744 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5745 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5746 if (pci_enable_msi(bp->pdev) == 0) {
5747 bp->flags |= BNX2_FLAG_USING_MSI;
5748 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5749 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5750 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5752 bp->irq_tbl[0].handler = bnx2_msi;
5754 bp->irq_tbl[0].vector = bp->pdev->irq;
5757 bp->num_tx_rings = 1;
5758 bp->num_rx_rings = bp->irq_nvecs;
5761 /* Called with rtnl_lock */
5763 bnx2_open(struct net_device *dev)
5765 struct bnx2 *bp = netdev_priv(dev);
5768 netif_carrier_off(dev);
5770 bnx2_set_power_state(bp, PCI_D0);
5771 bnx2_disable_int(bp);
5773 bnx2_setup_int_mode(bp, disable_msi);
5774 bnx2_napi_enable(bp);
5775 rc = bnx2_alloc_mem(bp);
5779 rc = bnx2_request_irq(bp);
5783 rc = bnx2_init_nic(bp, 1);
5787 mod_timer(&bp->timer, jiffies + bp->current_interval);
5789 atomic_set(&bp->intr_sem, 0);
5791 bnx2_enable_int(bp);
5793 if (bp->flags & BNX2_FLAG_USING_MSI) {
5794 /* Test MSI to make sure it is working
5795 * If MSI test fails, go back to INTx mode
5797 if (bnx2_test_intr(bp) != 0) {
5798 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5799 " using MSI, switching to INTx mode. Please"
5800 " report this failure to the PCI maintainer"
5801 " and include system chipset information.\n",
5804 bnx2_disable_int(bp);
5807 bnx2_setup_int_mode(bp, 1);
5809 rc = bnx2_init_nic(bp, 0);
5812 rc = bnx2_request_irq(bp);
5815 del_timer_sync(&bp->timer);
5818 bnx2_enable_int(bp);
5821 if (bp->flags & BNX2_FLAG_USING_MSI)
5822 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5823 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5824 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5826 netif_start_queue(dev);
5831 bnx2_napi_disable(bp);
5839 bnx2_reset_task(struct work_struct *work)
5841 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5843 if (!netif_running(bp->dev))
5846 bnx2_netif_stop(bp);
5848 bnx2_init_nic(bp, 1);
5850 atomic_set(&bp->intr_sem, 1);
5851 bnx2_netif_start(bp);
5855 bnx2_tx_timeout(struct net_device *dev)
5857 struct bnx2 *bp = netdev_priv(dev);
5859 /* This allows the netif to be shutdown gracefully before resetting */
5860 schedule_work(&bp->reset_task);
5864 /* Called with rtnl_lock */
5866 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5868 struct bnx2 *bp = netdev_priv(dev);
5870 bnx2_netif_stop(bp);
5873 bnx2_set_rx_mode(dev);
5875 bnx2_netif_start(bp);
5879 /* Called with netif_tx_lock.
5880 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5881 * netif_wake_queue().
5884 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5886 struct bnx2 *bp = netdev_priv(dev);
5889 struct sw_bd *tx_buf;
5890 u32 len, vlan_tag_flags, last_frag, mss;
5891 u16 prod, ring_prod;
5893 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5894 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5896 if (unlikely(bnx2_tx_avail(bp, txr) <
5897 (skb_shinfo(skb)->nr_frags + 1))) {
5898 netif_stop_queue(dev);
5899 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5902 return NETDEV_TX_BUSY;
5904 len = skb_headlen(skb);
5905 prod = txr->tx_prod;
5906 ring_prod = TX_RING_IDX(prod);
5909 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5910 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5913 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5915 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5917 if ((mss = skb_shinfo(skb)->gso_size)) {
5918 u32 tcp_opt_len, ip_tcp_len;
5921 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5923 tcp_opt_len = tcp_optlen(skb);
5925 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5926 u32 tcp_off = skb_transport_offset(skb) -
5927 sizeof(struct ipv6hdr) - ETH_HLEN;
5929 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5930 TX_BD_FLAGS_SW_FLAGS;
5931 if (likely(tcp_off == 0))
5932 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5935 vlan_tag_flags |= ((tcp_off & 0x3) <<
5936 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5937 ((tcp_off & 0x10) <<
5938 TX_BD_FLAGS_TCP6_OFF4_SHL);
5939 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5942 if (skb_header_cloned(skb) &&
5943 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5945 return NETDEV_TX_OK;
5948 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5952 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5953 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5957 if (tcp_opt_len || (iph->ihl > 5)) {
5958 vlan_tag_flags |= ((iph->ihl - 5) +
5959 (tcp_opt_len >> 2)) << 8;
5965 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5967 tx_buf = &txr->tx_buf_ring[ring_prod];
5969 pci_unmap_addr_set(tx_buf, mapping, mapping);
5971 txbd = &txr->tx_desc_ring[ring_prod];
5973 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5974 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5975 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5976 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5978 last_frag = skb_shinfo(skb)->nr_frags;
5980 for (i = 0; i < last_frag; i++) {
5981 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5983 prod = NEXT_TX_BD(prod);
5984 ring_prod = TX_RING_IDX(prod);
5985 txbd = &txr->tx_desc_ring[ring_prod];
5988 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5989 len, PCI_DMA_TODEVICE);
5990 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
5993 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5994 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5995 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5996 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5999 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6001 prod = NEXT_TX_BD(prod);
6002 txr->tx_prod_bseq += skb->len;
6004 REG_WR16(bp, txr->tx_bidx_addr, prod);
6005 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6009 txr->tx_prod = prod;
6010 dev->trans_start = jiffies;
6012 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6013 netif_stop_queue(dev);
6014 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6015 netif_wake_queue(dev);
6018 return NETDEV_TX_OK;
6021 /* Called with rtnl_lock */
6023 bnx2_close(struct net_device *dev)
6025 struct bnx2 *bp = netdev_priv(dev);
6028 cancel_work_sync(&bp->reset_task);
6030 bnx2_disable_int_sync(bp);
6031 bnx2_napi_disable(bp);
6032 del_timer_sync(&bp->timer);
6033 if (bp->flags & BNX2_FLAG_NO_WOL)
6034 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6036 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6038 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6039 bnx2_reset_chip(bp, reset_code);
6044 netif_carrier_off(bp->dev);
6045 bnx2_set_power_state(bp, PCI_D3hot);
6049 #define GET_NET_STATS64(ctr) \
6050 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6051 (unsigned long) (ctr##_lo)
6053 #define GET_NET_STATS32(ctr) \
6056 #if (BITS_PER_LONG == 64)
6057 #define GET_NET_STATS GET_NET_STATS64
6059 #define GET_NET_STATS GET_NET_STATS32
6062 static struct net_device_stats *
6063 bnx2_get_stats(struct net_device *dev)
6065 struct bnx2 *bp = netdev_priv(dev);
6066 struct statistics_block *stats_blk = bp->stats_blk;
6067 struct net_device_stats *net_stats = &bp->net_stats;
6069 if (bp->stats_blk == NULL) {
6072 net_stats->rx_packets =
6073 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6074 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6075 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6077 net_stats->tx_packets =
6078 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6079 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6080 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6082 net_stats->rx_bytes =
6083 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6085 net_stats->tx_bytes =
6086 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6088 net_stats->multicast =
6089 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6091 net_stats->collisions =
6092 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6094 net_stats->rx_length_errors =
6095 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6096 stats_blk->stat_EtherStatsOverrsizePkts);
6098 net_stats->rx_over_errors =
6099 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6101 net_stats->rx_frame_errors =
6102 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6104 net_stats->rx_crc_errors =
6105 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6107 net_stats->rx_errors = net_stats->rx_length_errors +
6108 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6109 net_stats->rx_crc_errors;
6111 net_stats->tx_aborted_errors =
6112 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6113 stats_blk->stat_Dot3StatsLateCollisions);
6115 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6116 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6117 net_stats->tx_carrier_errors = 0;
6119 net_stats->tx_carrier_errors =
6121 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6124 net_stats->tx_errors =
6126 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6128 net_stats->tx_aborted_errors +
6129 net_stats->tx_carrier_errors;
6131 net_stats->rx_missed_errors =
6132 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6133 stats_blk->stat_FwRxDrop);
6138 /* All ethtool functions called with rtnl_lock */
6141 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6143 struct bnx2 *bp = netdev_priv(dev);
6144 int support_serdes = 0, support_copper = 0;
6146 cmd->supported = SUPPORTED_Autoneg;
6147 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6150 } else if (bp->phy_port == PORT_FIBRE)
6155 if (support_serdes) {
6156 cmd->supported |= SUPPORTED_1000baseT_Full |
6158 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6159 cmd->supported |= SUPPORTED_2500baseX_Full;
6162 if (support_copper) {
6163 cmd->supported |= SUPPORTED_10baseT_Half |
6164 SUPPORTED_10baseT_Full |
6165 SUPPORTED_100baseT_Half |
6166 SUPPORTED_100baseT_Full |
6167 SUPPORTED_1000baseT_Full |
6172 spin_lock_bh(&bp->phy_lock);
6173 cmd->port = bp->phy_port;
6174 cmd->advertising = bp->advertising;
6176 if (bp->autoneg & AUTONEG_SPEED) {
6177 cmd->autoneg = AUTONEG_ENABLE;
6180 cmd->autoneg = AUTONEG_DISABLE;
6183 if (netif_carrier_ok(dev)) {
6184 cmd->speed = bp->line_speed;
6185 cmd->duplex = bp->duplex;
6191 spin_unlock_bh(&bp->phy_lock);
6193 cmd->transceiver = XCVR_INTERNAL;
6194 cmd->phy_address = bp->phy_addr;
6200 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6202 struct bnx2 *bp = netdev_priv(dev);
6203 u8 autoneg = bp->autoneg;
6204 u8 req_duplex = bp->req_duplex;
6205 u16 req_line_speed = bp->req_line_speed;
6206 u32 advertising = bp->advertising;
6209 spin_lock_bh(&bp->phy_lock);
6211 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6212 goto err_out_unlock;
6214 if (cmd->port != bp->phy_port &&
6215 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6216 goto err_out_unlock;
6218 if (cmd->autoneg == AUTONEG_ENABLE) {
6219 autoneg |= AUTONEG_SPEED;
6221 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6223 /* allow advertising 1 speed */
6224 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6225 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6226 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6227 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6229 if (cmd->port == PORT_FIBRE)
6230 goto err_out_unlock;
6232 advertising = cmd->advertising;
6234 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6235 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6236 (cmd->port == PORT_TP))
6237 goto err_out_unlock;
6238 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6239 advertising = cmd->advertising;
6240 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6241 goto err_out_unlock;
6243 if (cmd->port == PORT_FIBRE)
6244 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6246 advertising = ETHTOOL_ALL_COPPER_SPEED;
6248 advertising |= ADVERTISED_Autoneg;
6251 if (cmd->port == PORT_FIBRE) {
6252 if ((cmd->speed != SPEED_1000 &&
6253 cmd->speed != SPEED_2500) ||
6254 (cmd->duplex != DUPLEX_FULL))
6255 goto err_out_unlock;
6257 if (cmd->speed == SPEED_2500 &&
6258 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6259 goto err_out_unlock;
6261 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6262 goto err_out_unlock;
6264 autoneg &= ~AUTONEG_SPEED;
6265 req_line_speed = cmd->speed;
6266 req_duplex = cmd->duplex;
6270 bp->autoneg = autoneg;
6271 bp->advertising = advertising;
6272 bp->req_line_speed = req_line_speed;
6273 bp->req_duplex = req_duplex;
6275 err = bnx2_setup_phy(bp, cmd->port);
6278 spin_unlock_bh(&bp->phy_lock);
6284 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6286 struct bnx2 *bp = netdev_priv(dev);
6288 strcpy(info->driver, DRV_MODULE_NAME);
6289 strcpy(info->version, DRV_MODULE_VERSION);
6290 strcpy(info->bus_info, pci_name(bp->pdev));
6291 strcpy(info->fw_version, bp->fw_version);
6294 #define BNX2_REGDUMP_LEN (32 * 1024)
6297 bnx2_get_regs_len(struct net_device *dev)
6299 return BNX2_REGDUMP_LEN;
6303 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6305 u32 *p = _p, i, offset;
6307 struct bnx2 *bp = netdev_priv(dev);
6308 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6309 0x0800, 0x0880, 0x0c00, 0x0c10,
6310 0x0c30, 0x0d08, 0x1000, 0x101c,
6311 0x1040, 0x1048, 0x1080, 0x10a4,
6312 0x1400, 0x1490, 0x1498, 0x14f0,
6313 0x1500, 0x155c, 0x1580, 0x15dc,
6314 0x1600, 0x1658, 0x1680, 0x16d8,
6315 0x1800, 0x1820, 0x1840, 0x1854,
6316 0x1880, 0x1894, 0x1900, 0x1984,
6317 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6318 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6319 0x2000, 0x2030, 0x23c0, 0x2400,
6320 0x2800, 0x2820, 0x2830, 0x2850,
6321 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6322 0x3c00, 0x3c94, 0x4000, 0x4010,
6323 0x4080, 0x4090, 0x43c0, 0x4458,
6324 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6325 0x4fc0, 0x5010, 0x53c0, 0x5444,
6326 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6327 0x5fc0, 0x6000, 0x6400, 0x6428,
6328 0x6800, 0x6848, 0x684c, 0x6860,
6329 0x6888, 0x6910, 0x8000 };
6333 memset(p, 0, BNX2_REGDUMP_LEN);
6335 if (!netif_running(bp->dev))
6339 offset = reg_boundaries[0];
6341 while (offset < BNX2_REGDUMP_LEN) {
6342 *p++ = REG_RD(bp, offset);
6344 if (offset == reg_boundaries[i + 1]) {
6345 offset = reg_boundaries[i + 2];
6346 p = (u32 *) (orig_p + offset);
6353 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6355 struct bnx2 *bp = netdev_priv(dev);
6357 if (bp->flags & BNX2_FLAG_NO_WOL) {
6362 wol->supported = WAKE_MAGIC;
6364 wol->wolopts = WAKE_MAGIC;
6368 memset(&wol->sopass, 0, sizeof(wol->sopass));
6372 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6374 struct bnx2 *bp = netdev_priv(dev);
6376 if (wol->wolopts & ~WAKE_MAGIC)
6379 if (wol->wolopts & WAKE_MAGIC) {
6380 if (bp->flags & BNX2_FLAG_NO_WOL)
6392 bnx2_nway_reset(struct net_device *dev)
6394 struct bnx2 *bp = netdev_priv(dev);
6397 if (!(bp->autoneg & AUTONEG_SPEED)) {
6401 spin_lock_bh(&bp->phy_lock);
6403 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6406 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6407 spin_unlock_bh(&bp->phy_lock);
6411 /* Force a link down visible on the other side */
6412 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6413 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6414 spin_unlock_bh(&bp->phy_lock);
6418 spin_lock_bh(&bp->phy_lock);
6420 bp->current_interval = SERDES_AN_TIMEOUT;
6421 bp->serdes_an_pending = 1;
6422 mod_timer(&bp->timer, jiffies + bp->current_interval);
6425 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6426 bmcr &= ~BMCR_LOOPBACK;
6427 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6429 spin_unlock_bh(&bp->phy_lock);
6435 bnx2_get_eeprom_len(struct net_device *dev)
6437 struct bnx2 *bp = netdev_priv(dev);
6439 if (bp->flash_info == NULL)
6442 return (int) bp->flash_size;
6446 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6449 struct bnx2 *bp = netdev_priv(dev);
6452 /* parameters already validated in ethtool_get_eeprom */
6454 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6460 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6463 struct bnx2 *bp = netdev_priv(dev);
6466 /* parameters already validated in ethtool_set_eeprom */
6468 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6474 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6476 struct bnx2 *bp = netdev_priv(dev);
6478 memset(coal, 0, sizeof(struct ethtool_coalesce));
6480 coal->rx_coalesce_usecs = bp->rx_ticks;
6481 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6482 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6483 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6485 coal->tx_coalesce_usecs = bp->tx_ticks;
6486 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6487 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6488 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6490 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6496 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6498 struct bnx2 *bp = netdev_priv(dev);
6500 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6501 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6503 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6504 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6506 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6507 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6509 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6510 if (bp->rx_quick_cons_trip_int > 0xff)
6511 bp->rx_quick_cons_trip_int = 0xff;
6513 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6514 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6516 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6517 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6519 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6520 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6522 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6523 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6526 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6527 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6528 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6529 bp->stats_ticks = USEC_PER_SEC;
6531 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6532 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6533 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6535 if (netif_running(bp->dev)) {
6536 bnx2_netif_stop(bp);
6537 bnx2_init_nic(bp, 0);
6538 bnx2_netif_start(bp);
6545 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6547 struct bnx2 *bp = netdev_priv(dev);
6549 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6550 ering->rx_mini_max_pending = 0;
6551 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6553 ering->rx_pending = bp->rx_ring_size;
6554 ering->rx_mini_pending = 0;
6555 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6557 ering->tx_max_pending = MAX_TX_DESC_CNT;
6558 ering->tx_pending = bp->tx_ring_size;
6562 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6564 if (netif_running(bp->dev)) {
6565 bnx2_netif_stop(bp);
6566 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6571 bnx2_set_rx_ring_size(bp, rx);
6572 bp->tx_ring_size = tx;
6574 if (netif_running(bp->dev)) {
6577 rc = bnx2_alloc_mem(bp);
6580 bnx2_init_nic(bp, 0);
6581 bnx2_netif_start(bp);
6587 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6589 struct bnx2 *bp = netdev_priv(dev);
6592 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6593 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6594 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6598 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6603 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6605 struct bnx2 *bp = netdev_priv(dev);
6607 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6608 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6609 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6613 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6615 struct bnx2 *bp = netdev_priv(dev);
6617 bp->req_flow_ctrl = 0;
6618 if (epause->rx_pause)
6619 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6620 if (epause->tx_pause)
6621 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6623 if (epause->autoneg) {
6624 bp->autoneg |= AUTONEG_FLOW_CTRL;
6627 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6630 spin_lock_bh(&bp->phy_lock);
6632 bnx2_setup_phy(bp, bp->phy_port);
6634 spin_unlock_bh(&bp->phy_lock);
6640 bnx2_get_rx_csum(struct net_device *dev)
6642 struct bnx2 *bp = netdev_priv(dev);
6648 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6650 struct bnx2 *bp = netdev_priv(dev);
6657 bnx2_set_tso(struct net_device *dev, u32 data)
6659 struct bnx2 *bp = netdev_priv(dev);
6662 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6663 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6664 dev->features |= NETIF_F_TSO6;
6666 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6671 #define BNX2_NUM_STATS 46
6674 char string[ETH_GSTRING_LEN];
6675 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6677 { "rx_error_bytes" },
6679 { "tx_error_bytes" },
6680 { "rx_ucast_packets" },
6681 { "rx_mcast_packets" },
6682 { "rx_bcast_packets" },
6683 { "tx_ucast_packets" },
6684 { "tx_mcast_packets" },
6685 { "tx_bcast_packets" },
6686 { "tx_mac_errors" },
6687 { "tx_carrier_errors" },
6688 { "rx_crc_errors" },
6689 { "rx_align_errors" },
6690 { "tx_single_collisions" },
6691 { "tx_multi_collisions" },
6693 { "tx_excess_collisions" },
6694 { "tx_late_collisions" },
6695 { "tx_total_collisions" },
6698 { "rx_undersize_packets" },
6699 { "rx_oversize_packets" },
6700 { "rx_64_byte_packets" },
6701 { "rx_65_to_127_byte_packets" },
6702 { "rx_128_to_255_byte_packets" },
6703 { "rx_256_to_511_byte_packets" },
6704 { "rx_512_to_1023_byte_packets" },
6705 { "rx_1024_to_1522_byte_packets" },
6706 { "rx_1523_to_9022_byte_packets" },
6707 { "tx_64_byte_packets" },
6708 { "tx_65_to_127_byte_packets" },
6709 { "tx_128_to_255_byte_packets" },
6710 { "tx_256_to_511_byte_packets" },
6711 { "tx_512_to_1023_byte_packets" },
6712 { "tx_1024_to_1522_byte_packets" },
6713 { "tx_1523_to_9022_byte_packets" },
6714 { "rx_xon_frames" },
6715 { "rx_xoff_frames" },
6716 { "tx_xon_frames" },
6717 { "tx_xoff_frames" },
6718 { "rx_mac_ctrl_frames" },
6719 { "rx_filtered_packets" },
6721 { "rx_fw_discards" },
6724 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6726 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6727 STATS_OFFSET32(stat_IfHCInOctets_hi),
6728 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6729 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6730 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6731 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6732 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6733 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6734 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6735 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6736 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6737 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6738 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6739 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6740 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6741 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6742 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6743 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6744 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6745 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6746 STATS_OFFSET32(stat_EtherStatsCollisions),
6747 STATS_OFFSET32(stat_EtherStatsFragments),
6748 STATS_OFFSET32(stat_EtherStatsJabbers),
6749 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6750 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6751 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6752 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6753 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6754 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6755 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6756 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6757 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6758 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6759 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6760 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6761 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6762 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6763 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6764 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6765 STATS_OFFSET32(stat_XonPauseFramesReceived),
6766 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6767 STATS_OFFSET32(stat_OutXonSent),
6768 STATS_OFFSET32(stat_OutXoffSent),
6769 STATS_OFFSET32(stat_MacControlFramesReceived),
6770 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6771 STATS_OFFSET32(stat_IfInMBUFDiscards),
6772 STATS_OFFSET32(stat_FwRxDrop),
6775 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6776 * skipped because of errata.
6778 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6779 8,0,8,8,8,8,8,8,8,8,
6780 4,0,4,4,4,4,4,4,4,4,
6781 4,4,4,4,4,4,4,4,4,4,
6782 4,4,4,4,4,4,4,4,4,4,
6786 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6787 8,0,8,8,8,8,8,8,8,8,
6788 4,4,4,4,4,4,4,4,4,4,
6789 4,4,4,4,4,4,4,4,4,4,
6790 4,4,4,4,4,4,4,4,4,4,
6794 #define BNX2_NUM_TESTS 6
6797 char string[ETH_GSTRING_LEN];
6798 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6799 { "register_test (offline)" },
6800 { "memory_test (offline)" },
6801 { "loopback_test (offline)" },
6802 { "nvram_test (online)" },
6803 { "interrupt_test (online)" },
6804 { "link_test (online)" },
6808 bnx2_get_sset_count(struct net_device *dev, int sset)
6812 return BNX2_NUM_TESTS;
6814 return BNX2_NUM_STATS;
6821 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6823 struct bnx2 *bp = netdev_priv(dev);
6825 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6826 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6829 bnx2_netif_stop(bp);
6830 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6833 if (bnx2_test_registers(bp) != 0) {
6835 etest->flags |= ETH_TEST_FL_FAILED;
6837 if (bnx2_test_memory(bp) != 0) {
6839 etest->flags |= ETH_TEST_FL_FAILED;
6841 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6842 etest->flags |= ETH_TEST_FL_FAILED;
6844 if (!netif_running(bp->dev)) {
6845 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6848 bnx2_init_nic(bp, 1);
6849 bnx2_netif_start(bp);
6852 /* wait for link up */
6853 for (i = 0; i < 7; i++) {
6856 msleep_interruptible(1000);
6860 if (bnx2_test_nvram(bp) != 0) {
6862 etest->flags |= ETH_TEST_FL_FAILED;
6864 if (bnx2_test_intr(bp) != 0) {
6866 etest->flags |= ETH_TEST_FL_FAILED;
6869 if (bnx2_test_link(bp) != 0) {
6871 etest->flags |= ETH_TEST_FL_FAILED;
6877 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6879 switch (stringset) {
6881 memcpy(buf, bnx2_stats_str_arr,
6882 sizeof(bnx2_stats_str_arr));
6885 memcpy(buf, bnx2_tests_str_arr,
6886 sizeof(bnx2_tests_str_arr));
6892 bnx2_get_ethtool_stats(struct net_device *dev,
6893 struct ethtool_stats *stats, u64 *buf)
6895 struct bnx2 *bp = netdev_priv(dev);
6897 u32 *hw_stats = (u32 *) bp->stats_blk;
6898 u8 *stats_len_arr = NULL;
6900 if (hw_stats == NULL) {
6901 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6905 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6906 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6907 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6908 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6909 stats_len_arr = bnx2_5706_stats_len_arr;
6911 stats_len_arr = bnx2_5708_stats_len_arr;
6913 for (i = 0; i < BNX2_NUM_STATS; i++) {
6914 if (stats_len_arr[i] == 0) {
6915 /* skip this counter */
6919 if (stats_len_arr[i] == 4) {
6920 /* 4-byte counter */
6922 *(hw_stats + bnx2_stats_offset_arr[i]);
6925 /* 8-byte counter */
6926 buf[i] = (((u64) *(hw_stats +
6927 bnx2_stats_offset_arr[i])) << 32) +
6928 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6933 bnx2_phys_id(struct net_device *dev, u32 data)
6935 struct bnx2 *bp = netdev_priv(dev);
6942 save = REG_RD(bp, BNX2_MISC_CFG);
6943 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6945 for (i = 0; i < (data * 2); i++) {
6947 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6950 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6951 BNX2_EMAC_LED_1000MB_OVERRIDE |
6952 BNX2_EMAC_LED_100MB_OVERRIDE |
6953 BNX2_EMAC_LED_10MB_OVERRIDE |
6954 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6955 BNX2_EMAC_LED_TRAFFIC);
6957 msleep_interruptible(500);
6958 if (signal_pending(current))
6961 REG_WR(bp, BNX2_EMAC_LED, 0);
6962 REG_WR(bp, BNX2_MISC_CFG, save);
6967 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6969 struct bnx2 *bp = netdev_priv(dev);
6971 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6972 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6974 return (ethtool_op_set_tx_csum(dev, data));
6977 static const struct ethtool_ops bnx2_ethtool_ops = {
6978 .get_settings = bnx2_get_settings,
6979 .set_settings = bnx2_set_settings,
6980 .get_drvinfo = bnx2_get_drvinfo,
6981 .get_regs_len = bnx2_get_regs_len,
6982 .get_regs = bnx2_get_regs,
6983 .get_wol = bnx2_get_wol,
6984 .set_wol = bnx2_set_wol,
6985 .nway_reset = bnx2_nway_reset,
6986 .get_link = ethtool_op_get_link,
6987 .get_eeprom_len = bnx2_get_eeprom_len,
6988 .get_eeprom = bnx2_get_eeprom,
6989 .set_eeprom = bnx2_set_eeprom,
6990 .get_coalesce = bnx2_get_coalesce,
6991 .set_coalesce = bnx2_set_coalesce,
6992 .get_ringparam = bnx2_get_ringparam,
6993 .set_ringparam = bnx2_set_ringparam,
6994 .get_pauseparam = bnx2_get_pauseparam,
6995 .set_pauseparam = bnx2_set_pauseparam,
6996 .get_rx_csum = bnx2_get_rx_csum,
6997 .set_rx_csum = bnx2_set_rx_csum,
6998 .set_tx_csum = bnx2_set_tx_csum,
6999 .set_sg = ethtool_op_set_sg,
7000 .set_tso = bnx2_set_tso,
7001 .self_test = bnx2_self_test,
7002 .get_strings = bnx2_get_strings,
7003 .phys_id = bnx2_phys_id,
7004 .get_ethtool_stats = bnx2_get_ethtool_stats,
7005 .get_sset_count = bnx2_get_sset_count,
7008 /* Called with rtnl_lock */
7010 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7012 struct mii_ioctl_data *data = if_mii(ifr);
7013 struct bnx2 *bp = netdev_priv(dev);
7018 data->phy_id = bp->phy_addr;
7024 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7027 if (!netif_running(dev))
7030 spin_lock_bh(&bp->phy_lock);
7031 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7032 spin_unlock_bh(&bp->phy_lock);
7034 data->val_out = mii_regval;
7040 if (!capable(CAP_NET_ADMIN))
7043 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7046 if (!netif_running(dev))
7049 spin_lock_bh(&bp->phy_lock);
7050 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7051 spin_unlock_bh(&bp->phy_lock);
7062 /* Called with rtnl_lock */
7064 bnx2_change_mac_addr(struct net_device *dev, void *p)
7066 struct sockaddr *addr = p;
7067 struct bnx2 *bp = netdev_priv(dev);
7069 if (!is_valid_ether_addr(addr->sa_data))
7072 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7073 if (netif_running(dev))
7074 bnx2_set_mac_addr(bp);
7079 /* Called with rtnl_lock */
7081 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7083 struct bnx2 *bp = netdev_priv(dev);
7085 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7086 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7090 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7093 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7095 poll_bnx2(struct net_device *dev)
7097 struct bnx2 *bp = netdev_priv(dev);
7099 disable_irq(bp->pdev->irq);
7100 bnx2_interrupt(bp->pdev->irq, dev);
7101 enable_irq(bp->pdev->irq);
7105 static void __devinit
7106 bnx2_get_5709_media(struct bnx2 *bp)
7108 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7109 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7112 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7114 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7115 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7119 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7120 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7122 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7124 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7129 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7137 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7143 static void __devinit
7144 bnx2_get_pci_speed(struct bnx2 *bp)
7148 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7149 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7152 bp->flags |= BNX2_FLAG_PCIX;
7154 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7156 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7158 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7159 bp->bus_speed_mhz = 133;
7162 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7163 bp->bus_speed_mhz = 100;
7166 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7167 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7168 bp->bus_speed_mhz = 66;
7171 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7172 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7173 bp->bus_speed_mhz = 50;
7176 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7177 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7178 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7179 bp->bus_speed_mhz = 33;
7184 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7185 bp->bus_speed_mhz = 66;
7187 bp->bus_speed_mhz = 33;
7190 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7191 bp->flags |= BNX2_FLAG_PCI_32BIT;
7195 static int __devinit
7196 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7199 unsigned long mem_len;
7202 u64 dma_mask, persist_dma_mask;
7204 SET_NETDEV_DEV(dev, &pdev->dev);
7205 bp = netdev_priv(dev);
7210 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7211 rc = pci_enable_device(pdev);
7213 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7217 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7219 "Cannot find PCI device base address, aborting.\n");
7221 goto err_out_disable;
7224 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7226 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7227 goto err_out_disable;
7230 pci_set_master(pdev);
7231 pci_save_state(pdev);
7233 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7234 if (bp->pm_cap == 0) {
7236 "Cannot find power management capability, aborting.\n");
7238 goto err_out_release;
7244 spin_lock_init(&bp->phy_lock);
7245 spin_lock_init(&bp->indirect_lock);
7246 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7248 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7249 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7250 dev->mem_end = dev->mem_start + mem_len;
7251 dev->irq = pdev->irq;
7253 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7256 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7258 goto err_out_release;
7261 /* Configure byte swap and enable write to the reg_window registers.
7262 * Rely on CPU to do target byte swapping on big endian systems
7263 * The chip's target access swapping will not swap all accesses
7265 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7266 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7267 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7269 bnx2_set_power_state(bp, PCI_D0);
7271 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7274 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7276 "Cannot find PCIE capability, aborting.\n");
7280 bp->flags |= BNX2_FLAG_PCIE;
7281 if (CHIP_REV(bp) == CHIP_REV_Ax)
7282 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7284 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7285 if (bp->pcix_cap == 0) {
7287 "Cannot find PCIX capability, aborting.\n");
7293 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7294 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7295 bp->flags |= BNX2_FLAG_MSIX_CAP;
7298 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7299 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7300 bp->flags |= BNX2_FLAG_MSI_CAP;
7303 /* 5708 cannot support DMA addresses > 40-bit. */
7304 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7305 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7307 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7309 /* Configure DMA attributes. */
7310 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7311 dev->features |= NETIF_F_HIGHDMA;
7312 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7315 "pci_set_consistent_dma_mask failed, aborting.\n");
7318 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7319 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7323 if (!(bp->flags & BNX2_FLAG_PCIE))
7324 bnx2_get_pci_speed(bp);
7326 /* 5706A0 may falsely detect SERR and PERR. */
7327 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7328 reg = REG_RD(bp, PCI_COMMAND);
7329 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7330 REG_WR(bp, PCI_COMMAND, reg);
7332 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7333 !(bp->flags & BNX2_FLAG_PCIX)) {
7336 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7340 bnx2_init_nvram(bp);
7342 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7344 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7345 BNX2_SHM_HDR_SIGNATURE_SIG) {
7346 u32 off = PCI_FUNC(pdev->devfn) << 2;
7348 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7350 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7352 /* Get the permanent MAC address. First we need to make sure the
7353 * firmware is actually running.
7355 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7357 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7358 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7359 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7364 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7365 for (i = 0, j = 0; i < 3; i++) {
7368 num = (u8) (reg >> (24 - (i * 8)));
7369 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7370 if (num >= k || !skip0 || k == 1) {
7371 bp->fw_version[j++] = (num / k) + '0';
7376 bp->fw_version[j++] = '.';
7378 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7379 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7382 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7383 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7385 for (i = 0; i < 30; i++) {
7386 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7387 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7392 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7393 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7394 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7395 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7397 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7399 bp->fw_version[j++] = ' ';
7400 for (i = 0; i < 3; i++) {
7401 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7403 memcpy(&bp->fw_version[j], ®, 4);
7408 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7409 bp->mac_addr[0] = (u8) (reg >> 8);
7410 bp->mac_addr[1] = (u8) reg;
7412 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7413 bp->mac_addr[2] = (u8) (reg >> 24);
7414 bp->mac_addr[3] = (u8) (reg >> 16);
7415 bp->mac_addr[4] = (u8) (reg >> 8);
7416 bp->mac_addr[5] = (u8) reg;
7418 bp->tx_ring_size = MAX_TX_DESC_CNT;
7419 bnx2_set_rx_ring_size(bp, 255);
7423 bp->tx_quick_cons_trip_int = 20;
7424 bp->tx_quick_cons_trip = 20;
7425 bp->tx_ticks_int = 80;
7428 bp->rx_quick_cons_trip_int = 6;
7429 bp->rx_quick_cons_trip = 6;
7430 bp->rx_ticks_int = 18;
7433 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7435 bp->timer_interval = HZ;
7436 bp->current_interval = HZ;
7440 /* Disable WOL support if we are running on a SERDES chip. */
7441 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7442 bnx2_get_5709_media(bp);
7443 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7444 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7446 bp->phy_port = PORT_TP;
7447 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7448 bp->phy_port = PORT_FIBRE;
7449 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7450 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7451 bp->flags |= BNX2_FLAG_NO_WOL;
7454 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7455 /* Don't do parallel detect on this board because of
7456 * some board problems. The link will not go down
7457 * if we do parallel detect.
7459 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7460 pdev->subsystem_device == 0x310c)
7461 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7464 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7465 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7467 bnx2_init_remote_phy(bp);
7469 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7470 CHIP_NUM(bp) == CHIP_NUM_5708)
7471 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7472 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7473 (CHIP_REV(bp) == CHIP_REV_Ax ||
7474 CHIP_REV(bp) == CHIP_REV_Bx))
7475 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7477 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7478 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7479 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7480 bp->flags |= BNX2_FLAG_NO_WOL;
7484 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7485 bp->tx_quick_cons_trip_int =
7486 bp->tx_quick_cons_trip;
7487 bp->tx_ticks_int = bp->tx_ticks;
7488 bp->rx_quick_cons_trip_int =
7489 bp->rx_quick_cons_trip;
7490 bp->rx_ticks_int = bp->rx_ticks;
7491 bp->comp_prod_trip_int = bp->comp_prod_trip;
7492 bp->com_ticks_int = bp->com_ticks;
7493 bp->cmd_ticks_int = bp->cmd_ticks;
7496 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7498 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7499 * with byte enables disabled on the unused 32-bit word. This is legal
7500 * but causes problems on the AMD 8132 which will eventually stop
7501 * responding after a while.
7503 * AMD believes this incompatibility is unique to the 5706, and
7504 * prefers to locally disable MSI rather than globally disabling it.
7506 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7507 struct pci_dev *amd_8132 = NULL;
7509 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7510 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7513 if (amd_8132->revision >= 0x10 &&
7514 amd_8132->revision <= 0x13) {
7516 pci_dev_put(amd_8132);
7522 bnx2_set_default_link(bp);
7523 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7525 init_timer(&bp->timer);
7526 bp->timer.expires = RUN_AT(bp->timer_interval);
7527 bp->timer.data = (unsigned long) bp;
7528 bp->timer.function = bnx2_timer;
7534 iounmap(bp->regview);
7539 pci_release_regions(pdev);
7542 pci_disable_device(pdev);
7543 pci_set_drvdata(pdev, NULL);
7549 static char * __devinit
7550 bnx2_bus_string(struct bnx2 *bp, char *str)
7554 if (bp->flags & BNX2_FLAG_PCIE) {
7555 s += sprintf(s, "PCI Express");
7557 s += sprintf(s, "PCI");
7558 if (bp->flags & BNX2_FLAG_PCIX)
7559 s += sprintf(s, "-X");
7560 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7561 s += sprintf(s, " 32-bit");
7563 s += sprintf(s, " 64-bit");
7564 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7569 static void __devinit
7570 bnx2_init_napi(struct bnx2 *bp)
7574 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7575 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7576 int (*poll)(struct napi_struct *, int);
7581 poll = bnx2_poll_msix;
7583 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7588 static int __devinit
7589 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7591 static int version_printed = 0;
7592 struct net_device *dev = NULL;
7596 DECLARE_MAC_BUF(mac);
7598 if (version_printed++ == 0)
7599 printk(KERN_INFO "%s", version);
7601 /* dev zeroed in init_etherdev */
7602 dev = alloc_etherdev(sizeof(*bp));
7607 rc = bnx2_init_board(pdev, dev);
7613 dev->open = bnx2_open;
7614 dev->hard_start_xmit = bnx2_start_xmit;
7615 dev->stop = bnx2_close;
7616 dev->get_stats = bnx2_get_stats;
7617 dev->set_multicast_list = bnx2_set_rx_mode;
7618 dev->do_ioctl = bnx2_ioctl;
7619 dev->set_mac_address = bnx2_change_mac_addr;
7620 dev->change_mtu = bnx2_change_mtu;
7621 dev->tx_timeout = bnx2_tx_timeout;
7622 dev->watchdog_timeo = TX_TIMEOUT;
7624 dev->vlan_rx_register = bnx2_vlan_rx_register;
7626 dev->ethtool_ops = &bnx2_ethtool_ops;
7628 bp = netdev_priv(dev);
7631 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7632 dev->poll_controller = poll_bnx2;
7635 pci_set_drvdata(pdev, dev);
7637 memcpy(dev->dev_addr, bp->mac_addr, 6);
7638 memcpy(dev->perm_addr, bp->mac_addr, 6);
7639 bp->name = board_info[ent->driver_data].name;
7641 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7642 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7643 dev->features |= NETIF_F_IPV6_CSUM;
7646 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7648 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7649 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7650 dev->features |= NETIF_F_TSO6;
7652 if ((rc = register_netdev(dev))) {
7653 dev_err(&pdev->dev, "Cannot register net device\n");
7655 iounmap(bp->regview);
7656 pci_release_regions(pdev);
7657 pci_disable_device(pdev);
7658 pci_set_drvdata(pdev, NULL);
7663 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7664 "IRQ %d, node addr %s\n",
7667 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7668 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7669 bnx2_bus_string(bp, str),
7671 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7676 static void __devexit
7677 bnx2_remove_one(struct pci_dev *pdev)
7679 struct net_device *dev = pci_get_drvdata(pdev);
7680 struct bnx2 *bp = netdev_priv(dev);
7682 flush_scheduled_work();
7684 unregister_netdev(dev);
7687 iounmap(bp->regview);
7690 pci_release_regions(pdev);
7691 pci_disable_device(pdev);
7692 pci_set_drvdata(pdev, NULL);
7696 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7698 struct net_device *dev = pci_get_drvdata(pdev);
7699 struct bnx2 *bp = netdev_priv(dev);
7702 /* PCI register 4 needs to be saved whether netif_running() or not.
7703 * MSI address and data need to be saved if using MSI and
7706 pci_save_state(pdev);
7707 if (!netif_running(dev))
7710 flush_scheduled_work();
7711 bnx2_netif_stop(bp);
7712 netif_device_detach(dev);
7713 del_timer_sync(&bp->timer);
7714 if (bp->flags & BNX2_FLAG_NO_WOL)
7715 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7717 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7719 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7720 bnx2_reset_chip(bp, reset_code);
7722 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7727 bnx2_resume(struct pci_dev *pdev)
7729 struct net_device *dev = pci_get_drvdata(pdev);
7730 struct bnx2 *bp = netdev_priv(dev);
7732 pci_restore_state(pdev);
7733 if (!netif_running(dev))
7736 bnx2_set_power_state(bp, PCI_D0);
7737 netif_device_attach(dev);
7738 bnx2_init_nic(bp, 1);
7739 bnx2_netif_start(bp);
7744 * bnx2_io_error_detected - called when PCI error is detected
7745 * @pdev: Pointer to PCI device
7746 * @state: The current pci connection state
7748 * This function is called after a PCI bus error affecting
7749 * this device has been detected.
7751 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7752 pci_channel_state_t state)
7754 struct net_device *dev = pci_get_drvdata(pdev);
7755 struct bnx2 *bp = netdev_priv(dev);
7758 netif_device_detach(dev);
7760 if (netif_running(dev)) {
7761 bnx2_netif_stop(bp);
7762 del_timer_sync(&bp->timer);
7763 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7766 pci_disable_device(pdev);
7769 /* Request a slot slot reset. */
7770 return PCI_ERS_RESULT_NEED_RESET;
7774 * bnx2_io_slot_reset - called after the pci bus has been reset.
7775 * @pdev: Pointer to PCI device
7777 * Restart the card from scratch, as if from a cold-boot.
7779 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7781 struct net_device *dev = pci_get_drvdata(pdev);
7782 struct bnx2 *bp = netdev_priv(dev);
7785 if (pci_enable_device(pdev)) {
7787 "Cannot re-enable PCI device after reset.\n");
7789 return PCI_ERS_RESULT_DISCONNECT;
7791 pci_set_master(pdev);
7792 pci_restore_state(pdev);
7794 if (netif_running(dev)) {
7795 bnx2_set_power_state(bp, PCI_D0);
7796 bnx2_init_nic(bp, 1);
7800 return PCI_ERS_RESULT_RECOVERED;
7804 * bnx2_io_resume - called when traffic can start flowing again.
7805 * @pdev: Pointer to PCI device
7807 * This callback is called when the error recovery driver tells us that
7808 * its OK to resume normal operation.
7810 static void bnx2_io_resume(struct pci_dev *pdev)
7812 struct net_device *dev = pci_get_drvdata(pdev);
7813 struct bnx2 *bp = netdev_priv(dev);
7816 if (netif_running(dev))
7817 bnx2_netif_start(bp);
7819 netif_device_attach(dev);
7823 static struct pci_error_handlers bnx2_err_handler = {
7824 .error_detected = bnx2_io_error_detected,
7825 .slot_reset = bnx2_io_slot_reset,
7826 .resume = bnx2_io_resume,
7829 static struct pci_driver bnx2_pci_driver = {
7830 .name = DRV_MODULE_NAME,
7831 .id_table = bnx2_pci_tbl,
7832 .probe = bnx2_init_one,
7833 .remove = __devexit_p(bnx2_remove_one),
7834 .suspend = bnx2_suspend,
7835 .resume = bnx2_resume,
7836 .err_handler = &bnx2_err_handler,
7839 static int __init bnx2_init(void)
7841 return pci_register_driver(&bnx2_pci_driver);
7844 static void __exit bnx2_cleanup(void)
7846 pci_unregister_driver(&bnx2_pci_driver);
7849 module_init(bnx2_init);
7850 module_exit(bnx2_cleanup);