1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.8"
58 #define DRV_MODULE_RELDATE "April 24, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
556 switch (bp->line_speed) {
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 bnx2_report_link(struct bnx2 *bp)
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
611 printk("%d Mbps ", bp->line_speed);
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
616 printk("half duplex");
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
625 printk(", transmit ");
627 printk("flow control ON");
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
636 bnx2_report_fw_link(bp);
640 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
642 u32 local_adv, remote_adv;
645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
654 if (bp->duplex != DUPLEX_FULL) {
658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
710 bp->flow_ctrl = FLOW_CTRL_TX;
716 bnx2_5709s_linkup(struct bnx2 *bp)
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
750 bp->duplex = DUPLEX_HALF;
755 bnx2_5708s_linkup(struct bnx2 *bp)
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
778 bp->duplex = DUPLEX_HALF;
784 bnx2_5706s_linkup(struct bnx2 *bp)
786 u32 bmcr, local_adv, remote_adv, common;
789 bp->line_speed = SPEED_1000;
791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
796 bp->duplex = DUPLEX_HALF;
799 if (!(bmcr & BMCR_ANENABLE)) {
803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
813 bp->duplex = DUPLEX_HALF;
821 bnx2_copper_linkup(struct bnx2 *bp)
825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
873 bp->line_speed = SPEED_10;
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
879 bp->duplex = DUPLEX_HALF;
887 bnx2_set_mac_link(struct bnx2 *bp)
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902 BNX2_EMAC_MODE_25G_MODE);
905 switch (bp->line_speed) {
907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
913 val |= BNX2_EMAC_MODE_PORT_MII;
916 val |= BNX2_EMAC_MODE_25G_MODE;
919 val |= BNX2_EMAC_MODE_PORT_GMII;
924 val |= BNX2_EMAC_MODE_PORT_GMII;
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
954 bnx2_enable_bmsr1(struct bnx2 *bp)
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
963 bnx2_disable_bmsr1(struct bnx2 *bp)
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
972 bnx2_test_and_enable_2g5(struct bnx2 *bp)
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1001 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_enable_forced_2g5(struct bnx2 *bp)
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1062 bnx2_disable_forced_2g5(struct bnx2 *bp)
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1093 bnx2_set_link(struct bnx2 *bp)
1098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1103 link_up = bp->link_up;
1105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1118 bmsr &= ~BMSR_LSTATUS;
1121 if (bmsr & BMSR_LSTATUS) {
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
1125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
1129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
1133 bnx2_copper_linkup(bp);
1135 bnx2_resolve_flow_ctrl(bp);
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
1142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1150 bnx2_set_mac_link(bp);
1156 bnx2_reset_phy(struct bnx2 *bp)
1161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1163 #define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1167 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1168 if (!(reg & BMCR_RESET)) {
1173 if (i == PHY_RESET_MAX_WAIT) {
1180 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1191 adv = ADVERTISE_PAUSE_CAP;
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1199 adv = ADVERTISE_PAUSE_ASYM;
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1214 bnx2_setup_serdes_phy(struct bnx2 *bp)
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1221 int force_link_down = 0;
1223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1230 bnx2_read_phy(bp, bp->mii_adv, &adv);
1231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234 new_bmcr = bmcr & ~BMCR_ANENABLE;
1235 new_bmcr |= BMCR_SPEED1000;
1237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1252 if (bp->req_duplex == DUPLEX_FULL) {
1253 adv |= ADVERTISE_1000XFULL;
1254 new_bmcr |= BMCR_FULLDPLX;
1257 adv |= ADVERTISE_1000XHALF;
1258 new_bmcr &= ~BMCR_FULLDPLX;
1260 if ((new_bmcr != bmcr) || (force_link_down)) {
1261 /* Force a link down visible on the other side */
1263 bnx2_write_phy(bp, bp->mii_adv, adv &
1264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
1266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267 BMCR_ANRESTART | BMCR_ANENABLE);
1270 netif_carrier_off(bp->dev);
1271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272 bnx2_report_link(bp);
1274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
1283 bnx2_test_and_enable_2g5(bp);
1285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298 spin_unlock_bh(&bp->phy_lock);
1300 spin_lock_bh(&bp->phy_lock);
1303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
1325 #define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1328 #define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1333 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1336 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1339 bnx2_setup_copper_phy(struct bnx2 *bp)
1344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
1369 new_adv_reg |= ADVERTISE_CSMA;
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1399 if (new_bmcr != bmcr) {
1402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
1407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1408 spin_unlock_bh(&bp->phy_lock);
1410 spin_lock_bh(&bp->phy_lock);
1412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
1436 bnx2_setup_phy(struct bnx2 *bp)
1438 if (bp->loopback == MAC_LOOPBACK)
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1445 return (bnx2_setup_copper_phy(bp));
1450 bnx2_init_5709s_phy(struct bnx2 *bp)
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1499 bnx2_init_5708s_phy(struct bnx2 *bp)
1505 bp->mii_up1 = BCM5708S_UP1;
1507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1557 bnx2_init_5706s_phy(struct bnx2 *bp)
1561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1566 if (bp->dev->mtu > 1500) {
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1594 bnx2_init_copper_phy(struct bnx2 *bp)
1600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1619 if (bp->dev->mtu > 1500) {
1620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1646 bnx2_init_phy(struct bnx2 *bp)
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
1656 bp->mii_bmsr1 = MII_BMSR;
1657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
1668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
1672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
1676 rc = bnx2_init_copper_phy(bp);
1685 bnx2_set_mac_loopback(struct bnx2 *bp)
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1697 static int bnx2_test_link(struct bnx2 *);
1700 bnx2_set_phy_loopback(struct bnx2 *bp)
1705 spin_lock_bh(&bp->phy_lock);
1706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1708 spin_unlock_bh(&bp->phy_lock);
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1721 BNX2_EMAC_MODE_25G_MODE);
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1730 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1736 msg_data |= bp->fw_wr_seq;
1738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1740 /* wait for an acknowledgement. */
1741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1752 /* If we timed out, inform the firmware that this is the case. */
1753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1773 bnx2_init_5709_context(struct bnx2 *bp)
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1807 bnx2_init_context(struct bnx2 *bp)
1813 u32 vcid_addr, pcid_addr, offset;
1817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1820 vcid_addr = GET_PCID_ADDR(vcid);
1822 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1827 pcid_addr = GET_PCID_ADDR(new_vcid);
1830 vcid_addr = GET_CID_ADDR(vcid);
1831 pcid_addr = vcid_addr;
1834 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1837 /* Zero out the context. */
1838 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839 CTX_WR(bp, 0x00, offset, 0);
1842 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1848 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1854 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855 if (good_mbuf == NULL) {
1856 printk(KERN_ERR PFX "Failed to allocate memory in "
1857 "bnx2_alloc_bad_rbuf\n");
1861 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1866 /* Allocate a bunch of mbufs and save the good ones in an array. */
1867 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1871 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1873 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1875 /* The addresses with Bit 9 set are bad memory blocks. */
1876 if (!(val & (1 << 9))) {
1877 good_mbuf[good_mbuf_cnt] = (u16) val;
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1884 /* Free the good ones back to the mbuf pool thus discarding
1885 * all the bad ones. */
1886 while (good_mbuf_cnt) {
1889 val = good_mbuf[good_mbuf_cnt];
1890 val = (val << 9) | val | 1;
1892 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1899 bnx2_set_mac_addr(struct bnx2 *bp)
1902 u8 *mac_addr = bp->dev->dev_addr;
1904 val = (mac_addr[0] << 8) | mac_addr[1];
1906 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1909 (mac_addr[4] << 8) | mac_addr[5];
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1915 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1917 struct sk_buff *skb;
1918 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1920 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1921 unsigned long align;
1923 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1928 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929 skb_reserve(skb, BNX2_RX_ALIGN - align);
1931 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932 PCI_DMA_FROMDEVICE);
1935 pci_unmap_addr_set(rx_buf, mapping, mapping);
1937 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1940 bp->rx_prod_bseq += bp->rx_buf_use_size;
1946 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1948 struct status_block *sblk = bp->status_blk;
1949 u32 new_link_state, old_link_state;
1952 new_link_state = sblk->status_attn_bits & event;
1953 old_link_state = sblk->status_attn_bits_ack & event;
1954 if (new_link_state != old_link_state) {
1956 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1958 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1966 bnx2_phy_int(struct bnx2 *bp)
1968 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1969 spin_lock(&bp->phy_lock);
1971 spin_unlock(&bp->phy_lock);
1976 bnx2_tx_int(struct bnx2 *bp)
1978 struct status_block *sblk = bp->status_blk;
1979 u16 hw_cons, sw_cons, sw_ring_cons;
1982 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1983 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1986 sw_cons = bp->tx_cons;
1988 while (sw_cons != hw_cons) {
1989 struct sw_bd *tx_buf;
1990 struct sk_buff *skb;
1993 sw_ring_cons = TX_RING_IDX(sw_cons);
1995 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1998 /* partial BD completions possible with TSO packets */
1999 if (skb_is_gso(skb)) {
2000 u16 last_idx, last_ring_idx;
2002 last_idx = sw_cons +
2003 skb_shinfo(skb)->nr_frags + 1;
2004 last_ring_idx = sw_ring_cons +
2005 skb_shinfo(skb)->nr_frags + 1;
2006 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2009 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2014 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2015 skb_headlen(skb), PCI_DMA_TODEVICE);
2018 last = skb_shinfo(skb)->nr_frags;
2020 for (i = 0; i < last; i++) {
2021 sw_cons = NEXT_TX_BD(sw_cons);
2023 pci_unmap_page(bp->pdev,
2025 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2027 skb_shinfo(skb)->frags[i].size,
2031 sw_cons = NEXT_TX_BD(sw_cons);
2033 tx_free_bd += last + 1;
2037 hw_cons = bp->hw_tx_cons =
2038 sblk->status_tx_quick_consumer_index0;
2040 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2045 bp->tx_cons = sw_cons;
2046 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2047 * before checking for netif_queue_stopped(). Without the
2048 * memory barrier, there is a small possibility that bnx2_start_xmit()
2049 * will miss it and cause the queue to be stopped forever.
2053 if (unlikely(netif_queue_stopped(bp->dev)) &&
2054 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2055 netif_tx_lock(bp->dev);
2056 if ((netif_queue_stopped(bp->dev)) &&
2057 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2058 netif_wake_queue(bp->dev);
2059 netif_tx_unlock(bp->dev);
2064 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2067 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2068 struct rx_bd *cons_bd, *prod_bd;
2070 cons_rx_buf = &bp->rx_buf_ring[cons];
2071 prod_rx_buf = &bp->rx_buf_ring[prod];
2073 pci_dma_sync_single_for_device(bp->pdev,
2074 pci_unmap_addr(cons_rx_buf, mapping),
2075 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2077 bp->rx_prod_bseq += bp->rx_buf_use_size;
2079 prod_rx_buf->skb = skb;
2084 pci_unmap_addr_set(prod_rx_buf, mapping,
2085 pci_unmap_addr(cons_rx_buf, mapping));
2087 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2088 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2089 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2090 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2094 bnx2_rx_int(struct bnx2 *bp, int budget)
2096 struct status_block *sblk = bp->status_blk;
2097 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2098 struct l2_fhdr *rx_hdr;
2101 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2102 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2105 sw_cons = bp->rx_cons;
2106 sw_prod = bp->rx_prod;
2108 /* Memory barrier necessary as speculative reads of the rx
2109 * buffer can be ahead of the index in the status block
2112 while (sw_cons != hw_cons) {
2115 struct sw_bd *rx_buf;
2116 struct sk_buff *skb;
2117 dma_addr_t dma_addr;
2119 sw_ring_cons = RX_RING_IDX(sw_cons);
2120 sw_ring_prod = RX_RING_IDX(sw_prod);
2122 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2127 dma_addr = pci_unmap_addr(rx_buf, mapping);
2129 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2130 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2132 rx_hdr = (struct l2_fhdr *) skb->data;
2133 len = rx_hdr->l2_fhdr_pkt_len - 4;
2135 if ((status = rx_hdr->l2_fhdr_status) &
2136 (L2_FHDR_ERRORS_BAD_CRC |
2137 L2_FHDR_ERRORS_PHY_DECODE |
2138 L2_FHDR_ERRORS_ALIGNMENT |
2139 L2_FHDR_ERRORS_TOO_SHORT |
2140 L2_FHDR_ERRORS_GIANT_FRAME)) {
2145 /* Since we don't have a jumbo ring, copy small packets
2148 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2149 struct sk_buff *new_skb;
2151 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2152 if (new_skb == NULL)
2156 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2157 new_skb->data, len + 2);
2158 skb_reserve(new_skb, 2);
2159 skb_put(new_skb, len);
2161 bnx2_reuse_rx_skb(bp, skb,
2162 sw_ring_cons, sw_ring_prod);
2166 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2167 pci_unmap_single(bp->pdev, dma_addr,
2168 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2170 skb_reserve(skb, bp->rx_offset);
2175 bnx2_reuse_rx_skb(bp, skb,
2176 sw_ring_cons, sw_ring_prod);
2180 skb->protocol = eth_type_trans(skb, bp->dev);
2182 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2183 (ntohs(skb->protocol) != 0x8100)) {
2190 skb->ip_summed = CHECKSUM_NONE;
2192 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2193 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2195 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2196 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2197 skb->ip_summed = CHECKSUM_UNNECESSARY;
2201 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2202 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2203 rx_hdr->l2_fhdr_vlan_tag);
2207 netif_receive_skb(skb);
2209 bp->dev->last_rx = jiffies;
2213 sw_cons = NEXT_RX_BD(sw_cons);
2214 sw_prod = NEXT_RX_BD(sw_prod);
2216 if ((rx_pkt == budget))
2219 /* Refresh hw_cons to see if there is new work */
2220 if (sw_cons == hw_cons) {
2221 hw_cons = bp->hw_rx_cons =
2222 sblk->status_rx_quick_consumer_index0;
2223 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2228 bp->rx_cons = sw_cons;
2229 bp->rx_prod = sw_prod;
2231 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2233 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2241 /* MSI ISR - The only difference between this and the INTx ISR
2242 * is that the MSI interrupt is always serviced.
2245 bnx2_msi(int irq, void *dev_instance)
2247 struct net_device *dev = dev_instance;
2248 struct bnx2 *bp = netdev_priv(dev);
2250 prefetch(bp->status_blk);
2251 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2252 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2253 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2255 /* Return here if interrupt is disabled. */
2256 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2259 netif_rx_schedule(dev);
2265 bnx2_interrupt(int irq, void *dev_instance)
2267 struct net_device *dev = dev_instance;
2268 struct bnx2 *bp = netdev_priv(dev);
2270 /* When using INTx, it is possible for the interrupt to arrive
2271 * at the CPU before the status block posted prior to the
2272 * interrupt. Reading a register will flush the status block.
2273 * When using MSI, the MSI message will always complete after
2274 * the status block write.
2276 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2277 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2278 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2281 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2282 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2283 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2285 /* Return here if interrupt is shared and is disabled. */
2286 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2289 netif_rx_schedule(dev);
2294 #define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2297 bnx2_has_work(struct bnx2 *bp)
2299 struct status_block *sblk = bp->status_blk;
2301 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2302 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2305 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2306 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2313 bnx2_poll(struct net_device *dev, int *budget)
2315 struct bnx2 *bp = netdev_priv(dev);
2316 struct status_block *sblk = bp->status_blk;
2317 u32 status_attn_bits = sblk->status_attn_bits;
2318 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2320 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2321 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2325 /* This is needed to take care of transient status
2326 * during link changes.
2328 REG_WR(bp, BNX2_HC_COMMAND,
2329 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2330 REG_RD(bp, BNX2_HC_COMMAND);
2333 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2336 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2337 int orig_budget = *budget;
2340 if (orig_budget > dev->quota)
2341 orig_budget = dev->quota;
2343 work_done = bnx2_rx_int(bp, orig_budget);
2344 *budget -= work_done;
2345 dev->quota -= work_done;
2348 bp->last_status_idx = bp->status_blk->status_idx;
2351 if (!bnx2_has_work(bp)) {
2352 netif_rx_complete(dev);
2353 if (likely(bp->flags & USING_MSI_FLAG)) {
2354 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2355 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2356 bp->last_status_idx);
2359 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2360 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2361 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2362 bp->last_status_idx);
2364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2366 bp->last_status_idx);
2373 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2374 * from set_multicast.
2377 bnx2_set_rx_mode(struct net_device *dev)
2379 struct bnx2 *bp = netdev_priv(dev);
2380 u32 rx_mode, sort_mode;
2383 spin_lock_bh(&bp->phy_lock);
2385 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2386 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2387 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2389 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2390 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2392 if (!(bp->flags & ASF_ENABLE_FLAG))
2393 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2395 if (dev->flags & IFF_PROMISC) {
2396 /* Promiscuous mode. */
2397 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2398 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2399 BNX2_RPM_SORT_USER0_PROM_VLAN;
2401 else if (dev->flags & IFF_ALLMULTI) {
2402 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2403 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2406 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2409 /* Accept one or more multicast(s). */
2410 struct dev_mc_list *mclist;
2411 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2416 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2418 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2419 i++, mclist = mclist->next) {
2421 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2423 regidx = (bit & 0xe0) >> 5;
2425 mc_filter[regidx] |= (1 << bit);
2428 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2429 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2433 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2436 if (rx_mode != bp->rx_mode) {
2437 bp->rx_mode = rx_mode;
2438 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2441 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2442 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2443 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2445 spin_unlock_bh(&bp->phy_lock);
2448 #define FW_BUF_SIZE 0x8000
2451 bnx2_gunzip_init(struct bnx2 *bp)
2453 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2456 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2459 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2460 if (bp->strm->workspace == NULL)
2470 vfree(bp->gunzip_buf);
2471 bp->gunzip_buf = NULL;
2474 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2475 "uncompression.\n", bp->dev->name);
2480 bnx2_gunzip_end(struct bnx2 *bp)
2482 kfree(bp->strm->workspace);
2487 if (bp->gunzip_buf) {
2488 vfree(bp->gunzip_buf);
2489 bp->gunzip_buf = NULL;
2494 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2498 /* check gzip header */
2499 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2505 if (zbuf[3] & FNAME)
2506 while ((zbuf[n++] != 0) && (n < len));
2508 bp->strm->next_in = zbuf + n;
2509 bp->strm->avail_in = len - n;
2510 bp->strm->next_out = bp->gunzip_buf;
2511 bp->strm->avail_out = FW_BUF_SIZE;
2513 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2517 rc = zlib_inflate(bp->strm, Z_FINISH);
2519 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2520 *outbuf = bp->gunzip_buf;
2522 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2523 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2524 bp->dev->name, bp->strm->msg);
2526 zlib_inflateEnd(bp->strm);
2528 if (rc == Z_STREAM_END)
2535 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2542 for (i = 0; i < rv2p_code_len; i += 8) {
2543 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2545 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2548 if (rv2p_proc == RV2P_PROC1) {
2549 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2550 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2553 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2554 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2558 /* Reset the processor, un-stall is done later. */
2559 if (rv2p_proc == RV2P_PROC1) {
2560 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2563 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2568 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2575 val = REG_RD_IND(bp, cpu_reg->mode);
2576 val |= cpu_reg->mode_value_halt;
2577 REG_WR_IND(bp, cpu_reg->mode, val);
2578 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2580 /* Load the Text area. */
2581 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2586 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2596 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2597 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2601 /* Load the Data area. */
2602 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2606 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2607 REG_WR_IND(bp, offset, fw->data[j]);
2611 /* Load the SBSS area. */
2612 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2616 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2617 REG_WR_IND(bp, offset, fw->sbss[j]);
2621 /* Load the BSS area. */
2622 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2626 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2627 REG_WR_IND(bp, offset, fw->bss[j]);
2631 /* Load the Read-Only area. */
2632 offset = cpu_reg->spad_base +
2633 (fw->rodata_addr - cpu_reg->mips_view_base);
2637 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2638 REG_WR_IND(bp, offset, fw->rodata[j]);
2642 /* Clear the pre-fetch instruction. */
2643 REG_WR_IND(bp, cpu_reg->inst, 0);
2644 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2646 /* Start the CPU. */
2647 val = REG_RD_IND(bp, cpu_reg->mode);
2648 val &= ~cpu_reg->mode_value_halt;
2649 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2650 REG_WR_IND(bp, cpu_reg->mode, val);
2656 bnx2_init_cpus(struct bnx2 *bp)
2658 struct cpu_reg cpu_reg;
2664 if ((rc = bnx2_gunzip_init(bp)) != 0)
2667 /* Initialize the RV2P processor. */
2668 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2673 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2675 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2680 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2682 /* Initialize the RX Processor. */
2683 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2684 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2685 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2686 cpu_reg.state = BNX2_RXP_CPU_STATE;
2687 cpu_reg.state_value_clear = 0xffffff;
2688 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2689 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2690 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2691 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2692 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2693 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2694 cpu_reg.mips_view_base = 0x8000000;
2696 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2697 fw = &bnx2_rxp_fw_09;
2699 fw = &bnx2_rxp_fw_06;
2701 rc = load_cpu_fw(bp, &cpu_reg, fw);
2705 /* Initialize the TX Processor. */
2706 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2707 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2708 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2709 cpu_reg.state = BNX2_TXP_CPU_STATE;
2710 cpu_reg.state_value_clear = 0xffffff;
2711 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2712 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2713 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2714 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2715 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2716 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2717 cpu_reg.mips_view_base = 0x8000000;
2719 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2720 fw = &bnx2_txp_fw_09;
2722 fw = &bnx2_txp_fw_06;
2724 rc = load_cpu_fw(bp, &cpu_reg, fw);
2728 /* Initialize the TX Patch-up Processor. */
2729 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2730 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2731 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2732 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2733 cpu_reg.state_value_clear = 0xffffff;
2734 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2735 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2736 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2737 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2738 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2739 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2740 cpu_reg.mips_view_base = 0x8000000;
2742 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2743 fw = &bnx2_tpat_fw_09;
2745 fw = &bnx2_tpat_fw_06;
2747 rc = load_cpu_fw(bp, &cpu_reg, fw);
2751 /* Initialize the Completion Processor. */
2752 cpu_reg.mode = BNX2_COM_CPU_MODE;
2753 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2754 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2755 cpu_reg.state = BNX2_COM_CPU_STATE;
2756 cpu_reg.state_value_clear = 0xffffff;
2757 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2758 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2759 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2760 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2761 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2762 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2763 cpu_reg.mips_view_base = 0x8000000;
2765 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2766 fw = &bnx2_com_fw_09;
2768 fw = &bnx2_com_fw_06;
2770 rc = load_cpu_fw(bp, &cpu_reg, fw);
2774 /* Initialize the Command Processor. */
2775 cpu_reg.mode = BNX2_CP_CPU_MODE;
2776 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2777 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2778 cpu_reg.state = BNX2_CP_CPU_STATE;
2779 cpu_reg.state_value_clear = 0xffffff;
2780 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2781 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2782 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2783 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2784 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2785 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2786 cpu_reg.mips_view_base = 0x8000000;
2788 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2789 fw = &bnx2_cp_fw_09;
2791 rc = load_cpu_fw(bp, &cpu_reg, fw);
2796 bnx2_gunzip_end(bp);
2801 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2805 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2811 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2812 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2813 PCI_PM_CTRL_PME_STATUS);
2815 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2816 /* delay required during transition out of D3hot */
2819 val = REG_RD(bp, BNX2_EMAC_MODE);
2820 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2821 val &= ~BNX2_EMAC_MODE_MPKT;
2822 REG_WR(bp, BNX2_EMAC_MODE, val);
2824 val = REG_RD(bp, BNX2_RPM_CONFIG);
2825 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2826 REG_WR(bp, BNX2_RPM_CONFIG, val);
2837 autoneg = bp->autoneg;
2838 advertising = bp->advertising;
2840 bp->autoneg = AUTONEG_SPEED;
2841 bp->advertising = ADVERTISED_10baseT_Half |
2842 ADVERTISED_10baseT_Full |
2843 ADVERTISED_100baseT_Half |
2844 ADVERTISED_100baseT_Full |
2847 bnx2_setup_copper_phy(bp);
2849 bp->autoneg = autoneg;
2850 bp->advertising = advertising;
2852 bnx2_set_mac_addr(bp);
2854 val = REG_RD(bp, BNX2_EMAC_MODE);
2856 /* Enable port mode. */
2857 val &= ~BNX2_EMAC_MODE_PORT;
2858 val |= BNX2_EMAC_MODE_PORT_MII |
2859 BNX2_EMAC_MODE_MPKT_RCVD |
2860 BNX2_EMAC_MODE_ACPI_RCVD |
2861 BNX2_EMAC_MODE_MPKT;
2863 REG_WR(bp, BNX2_EMAC_MODE, val);
2865 /* receive all multicast */
2866 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2867 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2870 REG_WR(bp, BNX2_EMAC_RX_MODE,
2871 BNX2_EMAC_RX_MODE_SORT_MODE);
2873 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2874 BNX2_RPM_SORT_USER0_MC_EN;
2875 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2876 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2877 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2878 BNX2_RPM_SORT_USER0_ENA);
2880 /* Need to enable EMAC and RPM for WOL. */
2881 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2882 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2883 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2884 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2886 val = REG_RD(bp, BNX2_RPM_CONFIG);
2887 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2888 REG_WR(bp, BNX2_RPM_CONFIG, val);
2890 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2893 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2896 if (!(bp->flags & NO_WOL_FLAG))
2897 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2899 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2900 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2901 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2910 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2912 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2915 /* No more memory access after this point until
2916 * device is brought back to D0.
2928 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2933 /* Request access to the flash interface. */
2934 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2935 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2936 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2937 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2943 if (j >= NVRAM_TIMEOUT_COUNT)
2950 bnx2_release_nvram_lock(struct bnx2 *bp)
2955 /* Relinquish nvram interface. */
2956 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2958 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2959 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2960 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2966 if (j >= NVRAM_TIMEOUT_COUNT)
2974 bnx2_enable_nvram_write(struct bnx2 *bp)
2978 val = REG_RD(bp, BNX2_MISC_CFG);
2979 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2981 if (!bp->flash_info->buffered) {
2984 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2985 REG_WR(bp, BNX2_NVM_COMMAND,
2986 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2988 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2991 val = REG_RD(bp, BNX2_NVM_COMMAND);
2992 if (val & BNX2_NVM_COMMAND_DONE)
2996 if (j >= NVRAM_TIMEOUT_COUNT)
3003 bnx2_disable_nvram_write(struct bnx2 *bp)
3007 val = REG_RD(bp, BNX2_MISC_CFG);
3008 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3013 bnx2_enable_nvram_access(struct bnx2 *bp)
3017 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3018 /* Enable both bits, even on read. */
3019 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3020 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3024 bnx2_disable_nvram_access(struct bnx2 *bp)
3028 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3029 /* Disable both bits, even after read. */
3030 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3031 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3032 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3036 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3041 if (bp->flash_info->buffered)
3042 /* Buffered flash, no erase needed */
3045 /* Build an erase command */
3046 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3047 BNX2_NVM_COMMAND_DOIT;
3049 /* Need to clear DONE bit separately. */
3050 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3052 /* Address of the NVRAM to read from. */
3053 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3055 /* Issue an erase command. */
3056 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3058 /* Wait for completion. */
3059 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3064 val = REG_RD(bp, BNX2_NVM_COMMAND);
3065 if (val & BNX2_NVM_COMMAND_DONE)
3069 if (j >= NVRAM_TIMEOUT_COUNT)
3076 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3081 /* Build the command word. */
3082 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3084 /* Calculate an offset of a buffered flash. */
3085 if (bp->flash_info->buffered) {
3086 offset = ((offset / bp->flash_info->page_size) <<
3087 bp->flash_info->page_bits) +
3088 (offset % bp->flash_info->page_size);
3091 /* Need to clear DONE bit separately. */
3092 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3094 /* Address of the NVRAM to read from. */
3095 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3097 /* Issue a read command. */
3098 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3100 /* Wait for completion. */
3101 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3106 val = REG_RD(bp, BNX2_NVM_COMMAND);
3107 if (val & BNX2_NVM_COMMAND_DONE) {
3108 val = REG_RD(bp, BNX2_NVM_READ);
3110 val = be32_to_cpu(val);
3111 memcpy(ret_val, &val, 4);
3115 if (j >= NVRAM_TIMEOUT_COUNT)
3123 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3128 /* Build the command word. */
3129 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3131 /* Calculate an offset of a buffered flash. */
3132 if (bp->flash_info->buffered) {
3133 offset = ((offset / bp->flash_info->page_size) <<
3134 bp->flash_info->page_bits) +
3135 (offset % bp->flash_info->page_size);
3138 /* Need to clear DONE bit separately. */
3139 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3141 memcpy(&val32, val, 4);
3142 val32 = cpu_to_be32(val32);
3144 /* Write the data. */
3145 REG_WR(bp, BNX2_NVM_WRITE, val32);
3147 /* Address of the NVRAM to write to. */
3148 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3150 /* Issue the write command. */
3151 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3153 /* Wait for completion. */
3154 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3157 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3160 if (j >= NVRAM_TIMEOUT_COUNT)
3167 bnx2_init_nvram(struct bnx2 *bp)
3170 int j, entry_count, rc;
3171 struct flash_spec *flash;
3173 /* Determine the selected interface. */
3174 val = REG_RD(bp, BNX2_NVM_CFG1);
3176 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3179 if (val & 0x40000000) {
3181 /* Flash interface has been reconfigured */
3182 for (j = 0, flash = &flash_table[0]; j < entry_count;
3184 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3185 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3186 bp->flash_info = flash;
3193 /* Not yet been reconfigured */
3195 if (val & (1 << 23))
3196 mask = FLASH_BACKUP_STRAP_MASK;
3198 mask = FLASH_STRAP_MASK;
3200 for (j = 0, flash = &flash_table[0]; j < entry_count;
3203 if ((val & mask) == (flash->strapping & mask)) {
3204 bp->flash_info = flash;
3206 /* Request access to the flash interface. */
3207 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3210 /* Enable access to flash interface */
3211 bnx2_enable_nvram_access(bp);
3213 /* Reconfigure the flash interface */
3214 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3215 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3216 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3217 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3219 /* Disable access to flash interface */
3220 bnx2_disable_nvram_access(bp);
3221 bnx2_release_nvram_lock(bp);
3226 } /* if (val & 0x40000000) */
3228 if (j == entry_count) {
3229 bp->flash_info = NULL;
3230 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3234 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3235 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3237 bp->flash_size = val;
3239 bp->flash_size = bp->flash_info->total_size;
3245 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3249 u32 cmd_flags, offset32, len32, extra;
3254 /* Request access to the flash interface. */
3255 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3258 /* Enable access to flash interface */
3259 bnx2_enable_nvram_access(bp);
3272 pre_len = 4 - (offset & 3);
3274 if (pre_len >= len32) {
3276 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3277 BNX2_NVM_COMMAND_LAST;
3280 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3283 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3288 memcpy(ret_buf, buf + (offset & 3), pre_len);
3295 extra = 4 - (len32 & 3);
3296 len32 = (len32 + 4) & ~3;
3303 cmd_flags = BNX2_NVM_COMMAND_LAST;
3305 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3306 BNX2_NVM_COMMAND_LAST;
3308 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3310 memcpy(ret_buf, buf, 4 - extra);
3312 else if (len32 > 0) {
3315 /* Read the first word. */
3319 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3321 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3323 /* Advance to the next dword. */
3328 while (len32 > 4 && rc == 0) {
3329 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3331 /* Advance to the next dword. */
3340 cmd_flags = BNX2_NVM_COMMAND_LAST;
3341 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3343 memcpy(ret_buf, buf, 4 - extra);
3346 /* Disable access to flash interface */
3347 bnx2_disable_nvram_access(bp);
3349 bnx2_release_nvram_lock(bp);
3355 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3358 u32 written, offset32, len32;
3359 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3361 int align_start, align_end;
3366 align_start = align_end = 0;
3368 if ((align_start = (offset32 & 3))) {
3370 len32 += align_start;
3373 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3378 align_end = 4 - (len32 & 3);
3380 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3384 if (align_start || align_end) {
3385 align_buf = kmalloc(len32, GFP_KERNEL);
3386 if (align_buf == NULL)
3389 memcpy(align_buf, start, 4);
3392 memcpy(align_buf + len32 - 4, end, 4);
3394 memcpy(align_buf + align_start, data_buf, buf_size);
3398 if (bp->flash_info->buffered == 0) {
3399 flash_buffer = kmalloc(264, GFP_KERNEL);
3400 if (flash_buffer == NULL) {
3402 goto nvram_write_end;
3407 while ((written < len32) && (rc == 0)) {
3408 u32 page_start, page_end, data_start, data_end;
3409 u32 addr, cmd_flags;
3412 /* Find the page_start addr */
3413 page_start = offset32 + written;
3414 page_start -= (page_start % bp->flash_info->page_size);
3415 /* Find the page_end addr */
3416 page_end = page_start + bp->flash_info->page_size;
3417 /* Find the data_start addr */
3418 data_start = (written == 0) ? offset32 : page_start;
3419 /* Find the data_end addr */
3420 data_end = (page_end > offset32 + len32) ?
3421 (offset32 + len32) : page_end;
3423 /* Request access to the flash interface. */
3424 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3425 goto nvram_write_end;
3427 /* Enable access to flash interface */
3428 bnx2_enable_nvram_access(bp);
3430 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3431 if (bp->flash_info->buffered == 0) {
3434 /* Read the whole page into the buffer
3435 * (non-buffer flash only) */
3436 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3437 if (j == (bp->flash_info->page_size - 4)) {
3438 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3440 rc = bnx2_nvram_read_dword(bp,
3446 goto nvram_write_end;
3452 /* Enable writes to flash interface (unlock write-protect) */
3453 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3454 goto nvram_write_end;
3456 /* Loop to write back the buffer data from page_start to
3459 if (bp->flash_info->buffered == 0) {
3460 /* Erase the page */
3461 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3462 goto nvram_write_end;
3464 /* Re-enable the write again for the actual write */
3465 bnx2_enable_nvram_write(bp);
3467 for (addr = page_start; addr < data_start;
3468 addr += 4, i += 4) {
3470 rc = bnx2_nvram_write_dword(bp, addr,
3471 &flash_buffer[i], cmd_flags);
3474 goto nvram_write_end;
3480 /* Loop to write the new data from data_start to data_end */
3481 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3482 if ((addr == page_end - 4) ||
3483 ((bp->flash_info->buffered) &&
3484 (addr == data_end - 4))) {
3486 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3488 rc = bnx2_nvram_write_dword(bp, addr, buf,
3492 goto nvram_write_end;
3498 /* Loop to write back the buffer data from data_end
3500 if (bp->flash_info->buffered == 0) {
3501 for (addr = data_end; addr < page_end;
3502 addr += 4, i += 4) {
3504 if (addr == page_end-4) {
3505 cmd_flags = BNX2_NVM_COMMAND_LAST;
3507 rc = bnx2_nvram_write_dword(bp, addr,
3508 &flash_buffer[i], cmd_flags);
3511 goto nvram_write_end;
3517 /* Disable writes to flash interface (lock write-protect) */
3518 bnx2_disable_nvram_write(bp);
3520 /* Disable access to flash interface */
3521 bnx2_disable_nvram_access(bp);
3522 bnx2_release_nvram_lock(bp);
3524 /* Increment written */
3525 written += data_end - data_start;
3529 kfree(flash_buffer);
3535 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3540 /* Wait for the current PCI transaction to complete before
3541 * issuing a reset. */
3542 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3543 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3544 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3545 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3546 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3547 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3550 /* Wait for the firmware to tell us it is ok to issue a reset. */
3551 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3553 /* Deposit a driver reset signature so the firmware knows that
3554 * this is a soft reset. */
3555 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3556 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3558 /* Do a dummy read to force the chip to complete all current transaction
3559 * before we issue a reset. */
3560 val = REG_RD(bp, BNX2_MISC_ID);
3562 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3563 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3564 REG_RD(bp, BNX2_MISC_COMMAND);
3567 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3568 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3570 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3573 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3574 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3575 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3578 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3580 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3581 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3582 current->state = TASK_UNINTERRUPTIBLE;
3583 schedule_timeout(HZ / 50);
3586 /* Reset takes approximate 30 usec */
3587 for (i = 0; i < 10; i++) {
3588 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3589 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3590 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3595 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3596 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3597 printk(KERN_ERR PFX "Chip reset did not complete\n");
3602 /* Make sure byte swapping is properly configured. */
3603 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3604 if (val != 0x01020304) {
3605 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3609 /* Wait for the firmware to finish its initialization. */
3610 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3614 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3615 /* Adjust the voltage regular to two steps lower. The default
3616 * of this register is 0x0000000e. */
3617 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3619 /* Remove bad rbuf memory from the free pool. */
3620 rc = bnx2_alloc_bad_rbuf(bp);
3627 bnx2_init_chip(struct bnx2 *bp)
3632 /* Make sure the interrupt is not active. */
3633 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3635 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3636 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3638 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3640 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3641 DMA_READ_CHANS << 12 |
3642 DMA_WRITE_CHANS << 16;
3644 val |= (0x2 << 20) | (1 << 11);
3646 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3649 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3650 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3651 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3653 REG_WR(bp, BNX2_DMA_CONFIG, val);
3655 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3656 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3657 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3658 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3661 if (bp->flags & PCIX_FLAG) {
3664 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3666 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3667 val16 & ~PCI_X_CMD_ERO);
3670 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3671 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3672 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3673 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3675 /* Initialize context mapping and zero out the quick contexts. The
3676 * context block must have already been enabled. */
3677 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3678 bnx2_init_5709_context(bp);
3680 bnx2_init_context(bp);
3682 if ((rc = bnx2_init_cpus(bp)) != 0)
3685 bnx2_init_nvram(bp);
3687 bnx2_set_mac_addr(bp);
3689 val = REG_RD(bp, BNX2_MQ_CONFIG);
3690 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3691 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3692 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3693 val |= BNX2_MQ_CONFIG_HALT_DIS;
3695 REG_WR(bp, BNX2_MQ_CONFIG, val);
3697 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3698 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3699 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3701 val = (BCM_PAGE_BITS - 8) << 24;
3702 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3704 /* Configure page size. */
3705 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3706 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3707 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3708 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3710 val = bp->mac_addr[0] +
3711 (bp->mac_addr[1] << 8) +
3712 (bp->mac_addr[2] << 16) +
3714 (bp->mac_addr[4] << 8) +
3715 (bp->mac_addr[5] << 16);
3716 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3718 /* Program the MTU. Also include 4 bytes for CRC32. */
3719 val = bp->dev->mtu + ETH_HLEN + 4;
3720 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3721 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3722 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3724 bp->last_status_idx = 0;
3725 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3727 /* Set up how to generate a link change interrupt. */
3728 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3730 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3731 (u64) bp->status_blk_mapping & 0xffffffff);
3732 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3734 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3735 (u64) bp->stats_blk_mapping & 0xffffffff);
3736 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3737 (u64) bp->stats_blk_mapping >> 32);
3739 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3740 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3742 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3743 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3745 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3746 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3748 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3750 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3752 REG_WR(bp, BNX2_HC_COM_TICKS,
3753 (bp->com_ticks_int << 16) | bp->com_ticks);
3755 REG_WR(bp, BNX2_HC_CMD_TICKS,
3756 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3758 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3759 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3761 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3762 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3764 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3765 BNX2_HC_CONFIG_TX_TMR_MODE |
3766 BNX2_HC_CONFIG_COLLECT_STATS);
3769 /* Clear internal stats counters. */
3770 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3772 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
3774 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3775 BNX2_PORT_FEATURE_ASF_ENABLED)
3776 bp->flags |= ASF_ENABLE_FLAG;
3778 /* Initialize the receive filter. */
3779 bnx2_set_rx_mode(bp->dev);
3781 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3784 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3785 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3789 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3795 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3797 u32 val, offset0, offset1, offset2, offset3;
3799 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3800 offset0 = BNX2_L2CTX_TYPE_XI;
3801 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3802 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3803 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3805 offset0 = BNX2_L2CTX_TYPE;
3806 offset1 = BNX2_L2CTX_CMD_TYPE;
3807 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3808 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3810 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3811 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3813 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3814 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3816 val = (u64) bp->tx_desc_mapping >> 32;
3817 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3819 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3820 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3824 bnx2_init_tx_ring(struct bnx2 *bp)
3829 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3831 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3833 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3834 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3839 bp->tx_prod_bseq = 0;
3842 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3843 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3845 bnx2_init_tx_context(bp, cid);
3849 bnx2_init_rx_ring(struct bnx2 *bp)
3853 u16 prod, ring_prod;
3856 /* 8 for CRC and VLAN */
3857 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3859 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3861 ring_prod = prod = bp->rx_prod = 0;
3864 bp->rx_prod_bseq = 0;
3866 for (i = 0; i < bp->rx_max_ring; i++) {
3869 rxbd = &bp->rx_desc_ring[i][0];
3870 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3871 rxbd->rx_bd_len = bp->rx_buf_use_size;
3872 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3874 if (i == (bp->rx_max_ring - 1))
3878 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3879 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3883 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3884 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3886 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3888 val = (u64) bp->rx_desc_mapping[0] >> 32;
3889 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3891 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3892 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3894 for (i = 0; i < bp->rx_ring_size; i++) {
3895 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3898 prod = NEXT_RX_BD(prod);
3899 ring_prod = RX_RING_IDX(prod);
3903 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3905 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3909 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3913 bp->rx_ring_size = size;
3915 while (size > MAX_RX_DESC_CNT) {
3916 size -= MAX_RX_DESC_CNT;
3919 /* round to next power of 2 */
3921 while ((max & num_rings) == 0)
3924 if (num_rings != max)
3927 bp->rx_max_ring = max;
3928 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3932 bnx2_free_tx_skbs(struct bnx2 *bp)
3936 if (bp->tx_buf_ring == NULL)
3939 for (i = 0; i < TX_DESC_CNT; ) {
3940 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3941 struct sk_buff *skb = tx_buf->skb;
3949 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3950 skb_headlen(skb), PCI_DMA_TODEVICE);
3954 last = skb_shinfo(skb)->nr_frags;
3955 for (j = 0; j < last; j++) {
3956 tx_buf = &bp->tx_buf_ring[i + j + 1];
3957 pci_unmap_page(bp->pdev,
3958 pci_unmap_addr(tx_buf, mapping),
3959 skb_shinfo(skb)->frags[j].size,
3969 bnx2_free_rx_skbs(struct bnx2 *bp)
3973 if (bp->rx_buf_ring == NULL)
3976 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3977 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3978 struct sk_buff *skb = rx_buf->skb;
3983 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3984 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3993 bnx2_free_skbs(struct bnx2 *bp)
3995 bnx2_free_tx_skbs(bp);
3996 bnx2_free_rx_skbs(bp);
4000 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4004 rc = bnx2_reset_chip(bp, reset_code);
4009 if ((rc = bnx2_init_chip(bp)) != 0)
4012 bnx2_init_tx_ring(bp);
4013 bnx2_init_rx_ring(bp);
4018 bnx2_init_nic(struct bnx2 *bp)
4022 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4025 spin_lock_bh(&bp->phy_lock);
4027 spin_unlock_bh(&bp->phy_lock);
4033 bnx2_test_registers(struct bnx2 *bp)
4037 static const struct {
4040 #define BNX2_FL_NOT_5709 1
4044 { 0x006c, 0, 0x00000000, 0x0000003f },
4045 { 0x0090, 0, 0xffffffff, 0x00000000 },
4046 { 0x0094, 0, 0x00000000, 0x00000000 },
4048 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4049 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4050 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4051 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4052 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4053 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4054 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4055 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4056 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4058 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4059 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4060 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4061 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4062 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4063 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4065 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4066 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4067 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4069 { 0x1000, 0, 0x00000000, 0x00000001 },
4070 { 0x1004, 0, 0x00000000, 0x000f0001 },
4072 { 0x1408, 0, 0x01c00800, 0x00000000 },
4073 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4074 { 0x14a8, 0, 0x00000000, 0x000001ff },
4075 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4076 { 0x14b0, 0, 0x00000002, 0x00000001 },
4077 { 0x14b8, 0, 0x00000000, 0x00000000 },
4078 { 0x14c0, 0, 0x00000000, 0x00000009 },
4079 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4080 { 0x14cc, 0, 0x00000000, 0x00000001 },
4081 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4083 { 0x1800, 0, 0x00000000, 0x00000001 },
4084 { 0x1804, 0, 0x00000000, 0x00000003 },
4086 { 0x2800, 0, 0x00000000, 0x00000001 },
4087 { 0x2804, 0, 0x00000000, 0x00003f01 },
4088 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4089 { 0x2810, 0, 0xffff0000, 0x00000000 },
4090 { 0x2814, 0, 0xffff0000, 0x00000000 },
4091 { 0x2818, 0, 0xffff0000, 0x00000000 },
4092 { 0x281c, 0, 0xffff0000, 0x00000000 },
4093 { 0x2834, 0, 0xffffffff, 0x00000000 },
4094 { 0x2840, 0, 0x00000000, 0xffffffff },
4095 { 0x2844, 0, 0x00000000, 0xffffffff },
4096 { 0x2848, 0, 0xffffffff, 0x00000000 },
4097 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4099 { 0x2c00, 0, 0x00000000, 0x00000011 },
4100 { 0x2c04, 0, 0x00000000, 0x00030007 },
4102 { 0x3c00, 0, 0x00000000, 0x00000001 },
4103 { 0x3c04, 0, 0x00000000, 0x00070000 },
4104 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4105 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4106 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4107 { 0x3c14, 0, 0x00000000, 0xffffffff },
4108 { 0x3c18, 0, 0x00000000, 0xffffffff },
4109 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4110 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4112 { 0x5004, 0, 0x00000000, 0x0000007f },
4113 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4115 { 0x5c00, 0, 0x00000000, 0x00000001 },
4116 { 0x5c04, 0, 0x00000000, 0x0003000f },
4117 { 0x5c08, 0, 0x00000003, 0x00000000 },
4118 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4119 { 0x5c10, 0, 0x00000000, 0xffffffff },
4120 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4121 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4122 { 0x5c88, 0, 0x00000000, 0x00077373 },
4123 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4125 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4126 { 0x680c, 0, 0xffffffff, 0x00000000 },
4127 { 0x6810, 0, 0xffffffff, 0x00000000 },
4128 { 0x6814, 0, 0xffffffff, 0x00000000 },
4129 { 0x6818, 0, 0xffffffff, 0x00000000 },
4130 { 0x681c, 0, 0xffffffff, 0x00000000 },
4131 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4132 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4133 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4134 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4135 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4136 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4137 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4138 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4139 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4140 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4141 { 0x684c, 0, 0xffffffff, 0x00000000 },
4142 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4143 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4144 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4145 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4146 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4147 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4149 { 0xffff, 0, 0x00000000, 0x00000000 },
4154 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4157 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4158 u32 offset, rw_mask, ro_mask, save_val, val;
4159 u16 flags = reg_tbl[i].flags;
4161 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4164 offset = (u32) reg_tbl[i].offset;
4165 rw_mask = reg_tbl[i].rw_mask;
4166 ro_mask = reg_tbl[i].ro_mask;
4168 save_val = readl(bp->regview + offset);
4170 writel(0, bp->regview + offset);
4172 val = readl(bp->regview + offset);
4173 if ((val & rw_mask) != 0) {
4177 if ((val & ro_mask) != (save_val & ro_mask)) {
4181 writel(0xffffffff, bp->regview + offset);
4183 val = readl(bp->regview + offset);
4184 if ((val & rw_mask) != rw_mask) {
4188 if ((val & ro_mask) != (save_val & ro_mask)) {
4192 writel(save_val, bp->regview + offset);
4196 writel(save_val, bp->regview + offset);
4204 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4206 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4207 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4210 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4213 for (offset = 0; offset < size; offset += 4) {
4215 REG_WR_IND(bp, start + offset, test_pattern[i]);
4217 if (REG_RD_IND(bp, start + offset) !=
4227 bnx2_test_memory(struct bnx2 *bp)
4231 static struct mem_entry {
4234 } mem_tbl_5706[] = {
4235 { 0x60000, 0x4000 },
4236 { 0xa0000, 0x3000 },
4237 { 0xe0000, 0x4000 },
4238 { 0x120000, 0x4000 },
4239 { 0x1a0000, 0x4000 },
4240 { 0x160000, 0x4000 },
4244 { 0x60000, 0x4000 },
4245 { 0xa0000, 0x3000 },
4246 { 0xe0000, 0x4000 },
4247 { 0x120000, 0x4000 },
4248 { 0x1a0000, 0x4000 },
4251 struct mem_entry *mem_tbl;
4253 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4254 mem_tbl = mem_tbl_5709;
4256 mem_tbl = mem_tbl_5706;
4258 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4259 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4260 mem_tbl[i].len)) != 0) {
4268 #define BNX2_MAC_LOOPBACK 0
4269 #define BNX2_PHY_LOOPBACK 1
4272 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4274 unsigned int pkt_size, num_pkts, i;
4275 struct sk_buff *skb, *rx_skb;
4276 unsigned char *packet;
4277 u16 rx_start_idx, rx_idx;
4280 struct sw_bd *rx_buf;
4281 struct l2_fhdr *rx_hdr;
4284 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4285 bp->loopback = MAC_LOOPBACK;
4286 bnx2_set_mac_loopback(bp);
4288 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4289 bp->loopback = PHY_LOOPBACK;
4290 bnx2_set_phy_loopback(bp);
4296 skb = netdev_alloc_skb(bp->dev, pkt_size);
4299 packet = skb_put(skb, pkt_size);
4300 memcpy(packet, bp->dev->dev_addr, 6);
4301 memset(packet + 6, 0x0, 8);
4302 for (i = 14; i < pkt_size; i++)
4303 packet[i] = (unsigned char) (i & 0xff);
4305 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4308 REG_WR(bp, BNX2_HC_COMMAND,
4309 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4311 REG_RD(bp, BNX2_HC_COMMAND);
4314 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4318 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4320 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4321 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4322 txbd->tx_bd_mss_nbytes = pkt_size;
4323 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4326 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4327 bp->tx_prod_bseq += pkt_size;
4329 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4330 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4334 REG_WR(bp, BNX2_HC_COMMAND,
4335 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4337 REG_RD(bp, BNX2_HC_COMMAND);
4341 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4344 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4345 goto loopback_test_done;
4348 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4349 if (rx_idx != rx_start_idx + num_pkts) {
4350 goto loopback_test_done;
4353 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4354 rx_skb = rx_buf->skb;
4356 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4357 skb_reserve(rx_skb, bp->rx_offset);
4359 pci_dma_sync_single_for_cpu(bp->pdev,
4360 pci_unmap_addr(rx_buf, mapping),
4361 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4363 if (rx_hdr->l2_fhdr_status &
4364 (L2_FHDR_ERRORS_BAD_CRC |
4365 L2_FHDR_ERRORS_PHY_DECODE |
4366 L2_FHDR_ERRORS_ALIGNMENT |
4367 L2_FHDR_ERRORS_TOO_SHORT |
4368 L2_FHDR_ERRORS_GIANT_FRAME)) {
4370 goto loopback_test_done;
4373 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4374 goto loopback_test_done;
4377 for (i = 14; i < pkt_size; i++) {
4378 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4379 goto loopback_test_done;
4390 #define BNX2_MAC_LOOPBACK_FAILED 1
4391 #define BNX2_PHY_LOOPBACK_FAILED 2
4392 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4393 BNX2_PHY_LOOPBACK_FAILED)
4396 bnx2_test_loopback(struct bnx2 *bp)
4400 if (!netif_running(bp->dev))
4401 return BNX2_LOOPBACK_FAILED;
4403 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4404 spin_lock_bh(&bp->phy_lock);
4406 spin_unlock_bh(&bp->phy_lock);
4407 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4408 rc |= BNX2_MAC_LOOPBACK_FAILED;
4409 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4410 rc |= BNX2_PHY_LOOPBACK_FAILED;
4414 #define NVRAM_SIZE 0x200
4415 #define CRC32_RESIDUAL 0xdebb20e3
4418 bnx2_test_nvram(struct bnx2 *bp)
4420 u32 buf[NVRAM_SIZE / 4];
4421 u8 *data = (u8 *) buf;
4425 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4426 goto test_nvram_done;
4428 magic = be32_to_cpu(buf[0]);
4429 if (magic != 0x669955aa) {
4431 goto test_nvram_done;
4434 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4435 goto test_nvram_done;
4437 csum = ether_crc_le(0x100, data);
4438 if (csum != CRC32_RESIDUAL) {
4440 goto test_nvram_done;
4443 csum = ether_crc_le(0x100, data + 0x100);
4444 if (csum != CRC32_RESIDUAL) {
4453 bnx2_test_link(struct bnx2 *bp)
4457 spin_lock_bh(&bp->phy_lock);
4458 bnx2_enable_bmsr1(bp);
4459 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4460 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4461 bnx2_disable_bmsr1(bp);
4462 spin_unlock_bh(&bp->phy_lock);
4464 if (bmsr & BMSR_LSTATUS) {
4471 bnx2_test_intr(struct bnx2 *bp)
4476 if (!netif_running(bp->dev))
4479 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4481 /* This register is not touched during run-time. */
4482 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4483 REG_RD(bp, BNX2_HC_COMMAND);
4485 for (i = 0; i < 10; i++) {
4486 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4492 msleep_interruptible(10);
4501 bnx2_5706_serdes_timer(struct bnx2 *bp)
4503 spin_lock(&bp->phy_lock);
4504 if (bp->serdes_an_pending)
4505 bp->serdes_an_pending--;
4506 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4509 bp->current_interval = bp->timer_interval;
4511 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4513 if (bmcr & BMCR_ANENABLE) {
4516 bnx2_write_phy(bp, 0x1c, 0x7c00);
4517 bnx2_read_phy(bp, 0x1c, &phy1);
4519 bnx2_write_phy(bp, 0x17, 0x0f01);
4520 bnx2_read_phy(bp, 0x15, &phy2);
4521 bnx2_write_phy(bp, 0x17, 0x0f01);
4522 bnx2_read_phy(bp, 0x15, &phy2);
4524 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4525 !(phy2 & 0x20)) { /* no CONFIG */
4527 bmcr &= ~BMCR_ANENABLE;
4528 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4529 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4530 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4534 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4535 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4538 bnx2_write_phy(bp, 0x17, 0x0f01);
4539 bnx2_read_phy(bp, 0x15, &phy2);
4543 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4544 bmcr |= BMCR_ANENABLE;
4545 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4547 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4550 bp->current_interval = bp->timer_interval;
4552 spin_unlock(&bp->phy_lock);
4556 bnx2_5708_serdes_timer(struct bnx2 *bp)
4558 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4559 bp->serdes_an_pending = 0;
4563 spin_lock(&bp->phy_lock);
4564 if (bp->serdes_an_pending)
4565 bp->serdes_an_pending--;
4566 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4569 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4570 if (bmcr & BMCR_ANENABLE) {
4571 bnx2_enable_forced_2g5(bp);
4572 bp->current_interval = SERDES_FORCED_TIMEOUT;
4574 bnx2_disable_forced_2g5(bp);
4575 bp->serdes_an_pending = 2;
4576 bp->current_interval = bp->timer_interval;
4580 bp->current_interval = bp->timer_interval;
4582 spin_unlock(&bp->phy_lock);
4586 bnx2_timer(unsigned long data)
4588 struct bnx2 *bp = (struct bnx2 *) data;
4591 if (!netif_running(bp->dev))
4594 if (atomic_read(&bp->intr_sem) != 0)
4595 goto bnx2_restart_timer;
4597 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4598 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4600 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4602 if (bp->phy_flags & PHY_SERDES_FLAG) {
4603 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4604 bnx2_5706_serdes_timer(bp);
4606 bnx2_5708_serdes_timer(bp);
4610 mod_timer(&bp->timer, jiffies + bp->current_interval);
4613 /* Called with rtnl_lock */
4615 bnx2_open(struct net_device *dev)
4617 struct bnx2 *bp = netdev_priv(dev);
4620 netif_carrier_off(dev);
4622 bnx2_set_power_state(bp, PCI_D0);
4623 bnx2_disable_int(bp);
4625 rc = bnx2_alloc_mem(bp);
4629 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4630 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4633 if (pci_enable_msi(bp->pdev) == 0) {
4634 bp->flags |= USING_MSI_FLAG;
4635 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4639 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4640 IRQF_SHARED, dev->name, dev);
4644 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4652 rc = bnx2_init_nic(bp);
4655 free_irq(bp->pdev->irq, dev);
4656 if (bp->flags & USING_MSI_FLAG) {
4657 pci_disable_msi(bp->pdev);
4658 bp->flags &= ~USING_MSI_FLAG;
4665 mod_timer(&bp->timer, jiffies + bp->current_interval);
4667 atomic_set(&bp->intr_sem, 0);
4669 bnx2_enable_int(bp);
4671 if (bp->flags & USING_MSI_FLAG) {
4672 /* Test MSI to make sure it is working
4673 * If MSI test fails, go back to INTx mode
4675 if (bnx2_test_intr(bp) != 0) {
4676 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4677 " using MSI, switching to INTx mode. Please"
4678 " report this failure to the PCI maintainer"
4679 " and include system chipset information.\n",
4682 bnx2_disable_int(bp);
4683 free_irq(bp->pdev->irq, dev);
4684 pci_disable_msi(bp->pdev);
4685 bp->flags &= ~USING_MSI_FLAG;
4687 rc = bnx2_init_nic(bp);
4690 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4691 IRQF_SHARED, dev->name, dev);
4696 del_timer_sync(&bp->timer);
4699 bnx2_enable_int(bp);
4702 if (bp->flags & USING_MSI_FLAG) {
4703 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4706 netif_start_queue(dev);
4712 bnx2_reset_task(struct work_struct *work)
4714 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4716 if (!netif_running(bp->dev))
4719 bp->in_reset_task = 1;
4720 bnx2_netif_stop(bp);
4724 atomic_set(&bp->intr_sem, 1);
4725 bnx2_netif_start(bp);
4726 bp->in_reset_task = 0;
4730 bnx2_tx_timeout(struct net_device *dev)
4732 struct bnx2 *bp = netdev_priv(dev);
4734 /* This allows the netif to be shutdown gracefully before resetting */
4735 schedule_work(&bp->reset_task);
4739 /* Called with rtnl_lock */
4741 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4743 struct bnx2 *bp = netdev_priv(dev);
4745 bnx2_netif_stop(bp);
4748 bnx2_set_rx_mode(dev);
4750 bnx2_netif_start(bp);
4753 /* Called with rtnl_lock */
4755 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4757 struct bnx2 *bp = netdev_priv(dev);
4759 bnx2_netif_stop(bp);
4760 vlan_group_set_device(bp->vlgrp, vid, NULL);
4761 bnx2_set_rx_mode(dev);
4763 bnx2_netif_start(bp);
4767 /* Called with netif_tx_lock.
4768 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4769 * netif_wake_queue().
4772 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4774 struct bnx2 *bp = netdev_priv(dev);
4777 struct sw_bd *tx_buf;
4778 u32 len, vlan_tag_flags, last_frag, mss;
4779 u16 prod, ring_prod;
4782 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4783 netif_stop_queue(dev);
4784 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4787 return NETDEV_TX_BUSY;
4789 len = skb_headlen(skb);
4791 ring_prod = TX_RING_IDX(prod);
4794 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4795 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4798 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4800 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4802 if ((mss = skb_shinfo(skb)->gso_size) &&
4803 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4804 u32 tcp_opt_len, ip_tcp_len;
4807 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4809 tcp_opt_len = tcp_optlen(skb);
4811 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4812 u32 tcp_off = skb_transport_offset(skb) -
4813 sizeof(struct ipv6hdr) - ETH_HLEN;
4815 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4816 TX_BD_FLAGS_SW_FLAGS;
4817 if (likely(tcp_off == 0))
4818 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4821 vlan_tag_flags |= ((tcp_off & 0x3) <<
4822 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4823 ((tcp_off & 0x10) <<
4824 TX_BD_FLAGS_TCP6_OFF4_SHL);
4825 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4828 if (skb_header_cloned(skb) &&
4829 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4831 return NETDEV_TX_OK;
4834 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4838 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4839 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4843 if (tcp_opt_len || (iph->ihl > 5)) {
4844 vlan_tag_flags |= ((iph->ihl - 5) +
4845 (tcp_opt_len >> 2)) << 8;
4851 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4853 tx_buf = &bp->tx_buf_ring[ring_prod];
4855 pci_unmap_addr_set(tx_buf, mapping, mapping);
4857 txbd = &bp->tx_desc_ring[ring_prod];
4859 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4860 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4861 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4862 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4864 last_frag = skb_shinfo(skb)->nr_frags;
4866 for (i = 0; i < last_frag; i++) {
4867 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4869 prod = NEXT_TX_BD(prod);
4870 ring_prod = TX_RING_IDX(prod);
4871 txbd = &bp->tx_desc_ring[ring_prod];
4874 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4875 len, PCI_DMA_TODEVICE);
4876 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4879 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4880 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4881 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4882 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4885 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4887 prod = NEXT_TX_BD(prod);
4888 bp->tx_prod_bseq += skb->len;
4890 REG_WR16(bp, bp->tx_bidx_addr, prod);
4891 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4896 dev->trans_start = jiffies;
4898 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4899 netif_stop_queue(dev);
4900 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4901 netif_wake_queue(dev);
4904 return NETDEV_TX_OK;
4907 /* Called with rtnl_lock */
4909 bnx2_close(struct net_device *dev)
4911 struct bnx2 *bp = netdev_priv(dev);
4914 /* Calling flush_scheduled_work() may deadlock because
4915 * linkwatch_event() may be on the workqueue and it will try to get
4916 * the rtnl_lock which we are holding.
4918 while (bp->in_reset_task)
4921 bnx2_netif_stop(bp);
4922 del_timer_sync(&bp->timer);
4923 if (bp->flags & NO_WOL_FLAG)
4924 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4926 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4928 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4929 bnx2_reset_chip(bp, reset_code);
4930 free_irq(bp->pdev->irq, dev);
4931 if (bp->flags & USING_MSI_FLAG) {
4932 pci_disable_msi(bp->pdev);
4933 bp->flags &= ~USING_MSI_FLAG;
4938 netif_carrier_off(bp->dev);
4939 bnx2_set_power_state(bp, PCI_D3hot);
4943 #define GET_NET_STATS64(ctr) \
4944 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4945 (unsigned long) (ctr##_lo)
4947 #define GET_NET_STATS32(ctr) \
4950 #if (BITS_PER_LONG == 64)
4951 #define GET_NET_STATS GET_NET_STATS64
4953 #define GET_NET_STATS GET_NET_STATS32
4956 static struct net_device_stats *
4957 bnx2_get_stats(struct net_device *dev)
4959 struct bnx2 *bp = netdev_priv(dev);
4960 struct statistics_block *stats_blk = bp->stats_blk;
4961 struct net_device_stats *net_stats = &bp->net_stats;
4963 if (bp->stats_blk == NULL) {
4966 net_stats->rx_packets =
4967 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4968 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4969 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4971 net_stats->tx_packets =
4972 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4973 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4974 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4976 net_stats->rx_bytes =
4977 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4979 net_stats->tx_bytes =
4980 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4982 net_stats->multicast =
4983 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4985 net_stats->collisions =
4986 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4988 net_stats->rx_length_errors =
4989 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4990 stats_blk->stat_EtherStatsOverrsizePkts);
4992 net_stats->rx_over_errors =
4993 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4995 net_stats->rx_frame_errors =
4996 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4998 net_stats->rx_crc_errors =
4999 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5001 net_stats->rx_errors = net_stats->rx_length_errors +
5002 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5003 net_stats->rx_crc_errors;
5005 net_stats->tx_aborted_errors =
5006 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5007 stats_blk->stat_Dot3StatsLateCollisions);
5009 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5010 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5011 net_stats->tx_carrier_errors = 0;
5013 net_stats->tx_carrier_errors =
5015 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5018 net_stats->tx_errors =
5020 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5022 net_stats->tx_aborted_errors +
5023 net_stats->tx_carrier_errors;
5025 net_stats->rx_missed_errors =
5026 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5027 stats_blk->stat_FwRxDrop);
5032 /* All ethtool functions called with rtnl_lock */
5035 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5037 struct bnx2 *bp = netdev_priv(dev);
5039 cmd->supported = SUPPORTED_Autoneg;
5040 if (bp->phy_flags & PHY_SERDES_FLAG) {
5041 cmd->supported |= SUPPORTED_1000baseT_Full |
5043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5044 cmd->supported |= SUPPORTED_2500baseX_Full;
5046 cmd->port = PORT_FIBRE;
5049 cmd->supported |= SUPPORTED_10baseT_Half |
5050 SUPPORTED_10baseT_Full |
5051 SUPPORTED_100baseT_Half |
5052 SUPPORTED_100baseT_Full |
5053 SUPPORTED_1000baseT_Full |
5056 cmd->port = PORT_TP;
5059 cmd->advertising = bp->advertising;
5061 if (bp->autoneg & AUTONEG_SPEED) {
5062 cmd->autoneg = AUTONEG_ENABLE;
5065 cmd->autoneg = AUTONEG_DISABLE;
5068 if (netif_carrier_ok(dev)) {
5069 cmd->speed = bp->line_speed;
5070 cmd->duplex = bp->duplex;
5077 cmd->transceiver = XCVR_INTERNAL;
5078 cmd->phy_address = bp->phy_addr;
5084 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5086 struct bnx2 *bp = netdev_priv(dev);
5087 u8 autoneg = bp->autoneg;
5088 u8 req_duplex = bp->req_duplex;
5089 u16 req_line_speed = bp->req_line_speed;
5090 u32 advertising = bp->advertising;
5092 if (cmd->autoneg == AUTONEG_ENABLE) {
5093 autoneg |= AUTONEG_SPEED;
5095 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5097 /* allow advertising 1 speed */
5098 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5099 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5100 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5101 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5103 if (bp->phy_flags & PHY_SERDES_FLAG)
5106 advertising = cmd->advertising;
5108 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5109 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5111 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5112 advertising = cmd->advertising;
5114 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5118 if (bp->phy_flags & PHY_SERDES_FLAG) {
5119 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5122 advertising = ETHTOOL_ALL_COPPER_SPEED;
5125 advertising |= ADVERTISED_Autoneg;
5128 if (bp->phy_flags & PHY_SERDES_FLAG) {
5129 if ((cmd->speed != SPEED_1000 &&
5130 cmd->speed != SPEED_2500) ||
5131 (cmd->duplex != DUPLEX_FULL))
5134 if (cmd->speed == SPEED_2500 &&
5135 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5138 else if (cmd->speed == SPEED_1000) {
5141 autoneg &= ~AUTONEG_SPEED;
5142 req_line_speed = cmd->speed;
5143 req_duplex = cmd->duplex;
5147 bp->autoneg = autoneg;
5148 bp->advertising = advertising;
5149 bp->req_line_speed = req_line_speed;
5150 bp->req_duplex = req_duplex;
5152 spin_lock_bh(&bp->phy_lock);
5156 spin_unlock_bh(&bp->phy_lock);
5162 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5164 struct bnx2 *bp = netdev_priv(dev);
5166 strcpy(info->driver, DRV_MODULE_NAME);
5167 strcpy(info->version, DRV_MODULE_VERSION);
5168 strcpy(info->bus_info, pci_name(bp->pdev));
5169 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5170 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5171 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5172 info->fw_version[1] = info->fw_version[3] = '.';
5173 info->fw_version[5] = 0;
5176 #define BNX2_REGDUMP_LEN (32 * 1024)
5179 bnx2_get_regs_len(struct net_device *dev)
5181 return BNX2_REGDUMP_LEN;
5185 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5187 u32 *p = _p, i, offset;
5189 struct bnx2 *bp = netdev_priv(dev);
5190 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5191 0x0800, 0x0880, 0x0c00, 0x0c10,
5192 0x0c30, 0x0d08, 0x1000, 0x101c,
5193 0x1040, 0x1048, 0x1080, 0x10a4,
5194 0x1400, 0x1490, 0x1498, 0x14f0,
5195 0x1500, 0x155c, 0x1580, 0x15dc,
5196 0x1600, 0x1658, 0x1680, 0x16d8,
5197 0x1800, 0x1820, 0x1840, 0x1854,
5198 0x1880, 0x1894, 0x1900, 0x1984,
5199 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5200 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5201 0x2000, 0x2030, 0x23c0, 0x2400,
5202 0x2800, 0x2820, 0x2830, 0x2850,
5203 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5204 0x3c00, 0x3c94, 0x4000, 0x4010,
5205 0x4080, 0x4090, 0x43c0, 0x4458,
5206 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5207 0x4fc0, 0x5010, 0x53c0, 0x5444,
5208 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5209 0x5fc0, 0x6000, 0x6400, 0x6428,
5210 0x6800, 0x6848, 0x684c, 0x6860,
5211 0x6888, 0x6910, 0x8000 };
5215 memset(p, 0, BNX2_REGDUMP_LEN);
5217 if (!netif_running(bp->dev))
5221 offset = reg_boundaries[0];
5223 while (offset < BNX2_REGDUMP_LEN) {
5224 *p++ = REG_RD(bp, offset);
5226 if (offset == reg_boundaries[i + 1]) {
5227 offset = reg_boundaries[i + 2];
5228 p = (u32 *) (orig_p + offset);
5235 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5237 struct bnx2 *bp = netdev_priv(dev);
5239 if (bp->flags & NO_WOL_FLAG) {
5244 wol->supported = WAKE_MAGIC;
5246 wol->wolopts = WAKE_MAGIC;
5250 memset(&wol->sopass, 0, sizeof(wol->sopass));
5254 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5256 struct bnx2 *bp = netdev_priv(dev);
5258 if (wol->wolopts & ~WAKE_MAGIC)
5261 if (wol->wolopts & WAKE_MAGIC) {
5262 if (bp->flags & NO_WOL_FLAG)
5274 bnx2_nway_reset(struct net_device *dev)
5276 struct bnx2 *bp = netdev_priv(dev);
5279 if (!(bp->autoneg & AUTONEG_SPEED)) {
5283 spin_lock_bh(&bp->phy_lock);
5285 /* Force a link down visible on the other side */
5286 if (bp->phy_flags & PHY_SERDES_FLAG) {
5287 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5288 spin_unlock_bh(&bp->phy_lock);
5292 spin_lock_bh(&bp->phy_lock);
5294 bp->current_interval = SERDES_AN_TIMEOUT;
5295 bp->serdes_an_pending = 1;
5296 mod_timer(&bp->timer, jiffies + bp->current_interval);
5299 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5300 bmcr &= ~BMCR_LOOPBACK;
5301 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5303 spin_unlock_bh(&bp->phy_lock);
5309 bnx2_get_eeprom_len(struct net_device *dev)
5311 struct bnx2 *bp = netdev_priv(dev);
5313 if (bp->flash_info == NULL)
5316 return (int) bp->flash_size;
5320 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5323 struct bnx2 *bp = netdev_priv(dev);
5326 /* parameters already validated in ethtool_get_eeprom */
5328 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5334 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5337 struct bnx2 *bp = netdev_priv(dev);
5340 /* parameters already validated in ethtool_set_eeprom */
5342 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5348 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5350 struct bnx2 *bp = netdev_priv(dev);
5352 memset(coal, 0, sizeof(struct ethtool_coalesce));
5354 coal->rx_coalesce_usecs = bp->rx_ticks;
5355 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5356 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5357 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5359 coal->tx_coalesce_usecs = bp->tx_ticks;
5360 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5361 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5362 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5364 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5370 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5372 struct bnx2 *bp = netdev_priv(dev);
5374 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5375 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5377 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5378 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5380 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5381 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5383 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5384 if (bp->rx_quick_cons_trip_int > 0xff)
5385 bp->rx_quick_cons_trip_int = 0xff;
5387 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5388 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5390 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5391 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5393 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5394 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5396 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5397 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5400 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5401 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5402 bp->stats_ticks &= 0xffff00;
5404 if (netif_running(bp->dev)) {
5405 bnx2_netif_stop(bp);
5407 bnx2_netif_start(bp);
5414 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5416 struct bnx2 *bp = netdev_priv(dev);
5418 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5419 ering->rx_mini_max_pending = 0;
5420 ering->rx_jumbo_max_pending = 0;
5422 ering->rx_pending = bp->rx_ring_size;
5423 ering->rx_mini_pending = 0;
5424 ering->rx_jumbo_pending = 0;
5426 ering->tx_max_pending = MAX_TX_DESC_CNT;
5427 ering->tx_pending = bp->tx_ring_size;
5431 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5433 struct bnx2 *bp = netdev_priv(dev);
5435 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5436 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5437 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5441 if (netif_running(bp->dev)) {
5442 bnx2_netif_stop(bp);
5443 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5448 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5449 bp->tx_ring_size = ering->tx_pending;
5451 if (netif_running(bp->dev)) {
5454 rc = bnx2_alloc_mem(bp);
5458 bnx2_netif_start(bp);
5465 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5467 struct bnx2 *bp = netdev_priv(dev);
5469 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5470 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5471 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5475 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5477 struct bnx2 *bp = netdev_priv(dev);
5479 bp->req_flow_ctrl = 0;
5480 if (epause->rx_pause)
5481 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5482 if (epause->tx_pause)
5483 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5485 if (epause->autoneg) {
5486 bp->autoneg |= AUTONEG_FLOW_CTRL;
5489 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5492 spin_lock_bh(&bp->phy_lock);
5496 spin_unlock_bh(&bp->phy_lock);
5502 bnx2_get_rx_csum(struct net_device *dev)
5504 struct bnx2 *bp = netdev_priv(dev);
5510 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5512 struct bnx2 *bp = netdev_priv(dev);
5519 bnx2_set_tso(struct net_device *dev, u32 data)
5521 struct bnx2 *bp = netdev_priv(dev);
5524 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5525 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5526 dev->features |= NETIF_F_TSO6;
5528 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5533 #define BNX2_NUM_STATS 46
5536 char string[ETH_GSTRING_LEN];
5537 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5539 { "rx_error_bytes" },
5541 { "tx_error_bytes" },
5542 { "rx_ucast_packets" },
5543 { "rx_mcast_packets" },
5544 { "rx_bcast_packets" },
5545 { "tx_ucast_packets" },
5546 { "tx_mcast_packets" },
5547 { "tx_bcast_packets" },
5548 { "tx_mac_errors" },
5549 { "tx_carrier_errors" },
5550 { "rx_crc_errors" },
5551 { "rx_align_errors" },
5552 { "tx_single_collisions" },
5553 { "tx_multi_collisions" },
5555 { "tx_excess_collisions" },
5556 { "tx_late_collisions" },
5557 { "tx_total_collisions" },
5560 { "rx_undersize_packets" },
5561 { "rx_oversize_packets" },
5562 { "rx_64_byte_packets" },
5563 { "rx_65_to_127_byte_packets" },
5564 { "rx_128_to_255_byte_packets" },
5565 { "rx_256_to_511_byte_packets" },
5566 { "rx_512_to_1023_byte_packets" },
5567 { "rx_1024_to_1522_byte_packets" },
5568 { "rx_1523_to_9022_byte_packets" },
5569 { "tx_64_byte_packets" },
5570 { "tx_65_to_127_byte_packets" },
5571 { "tx_128_to_255_byte_packets" },
5572 { "tx_256_to_511_byte_packets" },
5573 { "tx_512_to_1023_byte_packets" },
5574 { "tx_1024_to_1522_byte_packets" },
5575 { "tx_1523_to_9022_byte_packets" },
5576 { "rx_xon_frames" },
5577 { "rx_xoff_frames" },
5578 { "tx_xon_frames" },
5579 { "tx_xoff_frames" },
5580 { "rx_mac_ctrl_frames" },
5581 { "rx_filtered_packets" },
5583 { "rx_fw_discards" },
5586 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5588 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5589 STATS_OFFSET32(stat_IfHCInOctets_hi),
5590 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5591 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5592 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5593 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5594 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5595 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5596 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5597 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5598 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5599 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5600 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5601 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5602 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5603 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5604 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5605 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5606 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5607 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5608 STATS_OFFSET32(stat_EtherStatsCollisions),
5609 STATS_OFFSET32(stat_EtherStatsFragments),
5610 STATS_OFFSET32(stat_EtherStatsJabbers),
5611 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5612 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5613 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5614 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5615 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5616 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5617 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5618 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5619 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5620 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5621 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5622 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5623 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5624 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5625 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5626 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5627 STATS_OFFSET32(stat_XonPauseFramesReceived),
5628 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5629 STATS_OFFSET32(stat_OutXonSent),
5630 STATS_OFFSET32(stat_OutXoffSent),
5631 STATS_OFFSET32(stat_MacControlFramesReceived),
5632 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5633 STATS_OFFSET32(stat_IfInMBUFDiscards),
5634 STATS_OFFSET32(stat_FwRxDrop),
5637 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5638 * skipped because of errata.
5640 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5641 8,0,8,8,8,8,8,8,8,8,
5642 4,0,4,4,4,4,4,4,4,4,
5643 4,4,4,4,4,4,4,4,4,4,
5644 4,4,4,4,4,4,4,4,4,4,
5648 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5649 8,0,8,8,8,8,8,8,8,8,
5650 4,4,4,4,4,4,4,4,4,4,
5651 4,4,4,4,4,4,4,4,4,4,
5652 4,4,4,4,4,4,4,4,4,4,
5656 #define BNX2_NUM_TESTS 6
5659 char string[ETH_GSTRING_LEN];
5660 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5661 { "register_test (offline)" },
5662 { "memory_test (offline)" },
5663 { "loopback_test (offline)" },
5664 { "nvram_test (online)" },
5665 { "interrupt_test (online)" },
5666 { "link_test (online)" },
5670 bnx2_self_test_count(struct net_device *dev)
5672 return BNX2_NUM_TESTS;
5676 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5678 struct bnx2 *bp = netdev_priv(dev);
5680 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5681 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5684 bnx2_netif_stop(bp);
5685 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5688 if (bnx2_test_registers(bp) != 0) {
5690 etest->flags |= ETH_TEST_FL_FAILED;
5692 if (bnx2_test_memory(bp) != 0) {
5694 etest->flags |= ETH_TEST_FL_FAILED;
5696 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5697 etest->flags |= ETH_TEST_FL_FAILED;
5699 if (!netif_running(bp->dev)) {
5700 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5704 bnx2_netif_start(bp);
5707 /* wait for link up */
5708 for (i = 0; i < 7; i++) {
5711 msleep_interruptible(1000);
5715 if (bnx2_test_nvram(bp) != 0) {
5717 etest->flags |= ETH_TEST_FL_FAILED;
5719 if (bnx2_test_intr(bp) != 0) {
5721 etest->flags |= ETH_TEST_FL_FAILED;
5724 if (bnx2_test_link(bp) != 0) {
5726 etest->flags |= ETH_TEST_FL_FAILED;
5732 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5734 switch (stringset) {
5736 memcpy(buf, bnx2_stats_str_arr,
5737 sizeof(bnx2_stats_str_arr));
5740 memcpy(buf, bnx2_tests_str_arr,
5741 sizeof(bnx2_tests_str_arr));
5747 bnx2_get_stats_count(struct net_device *dev)
5749 return BNX2_NUM_STATS;
5753 bnx2_get_ethtool_stats(struct net_device *dev,
5754 struct ethtool_stats *stats, u64 *buf)
5756 struct bnx2 *bp = netdev_priv(dev);
5758 u32 *hw_stats = (u32 *) bp->stats_blk;
5759 u8 *stats_len_arr = NULL;
5761 if (hw_stats == NULL) {
5762 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5766 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5767 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5768 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5769 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5770 stats_len_arr = bnx2_5706_stats_len_arr;
5772 stats_len_arr = bnx2_5708_stats_len_arr;
5774 for (i = 0; i < BNX2_NUM_STATS; i++) {
5775 if (stats_len_arr[i] == 0) {
5776 /* skip this counter */
5780 if (stats_len_arr[i] == 4) {
5781 /* 4-byte counter */
5783 *(hw_stats + bnx2_stats_offset_arr[i]);
5786 /* 8-byte counter */
5787 buf[i] = (((u64) *(hw_stats +
5788 bnx2_stats_offset_arr[i])) << 32) +
5789 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5794 bnx2_phys_id(struct net_device *dev, u32 data)
5796 struct bnx2 *bp = netdev_priv(dev);
5803 save = REG_RD(bp, BNX2_MISC_CFG);
5804 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5806 for (i = 0; i < (data * 2); i++) {
5808 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5811 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5812 BNX2_EMAC_LED_1000MB_OVERRIDE |
5813 BNX2_EMAC_LED_100MB_OVERRIDE |
5814 BNX2_EMAC_LED_10MB_OVERRIDE |
5815 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5816 BNX2_EMAC_LED_TRAFFIC);
5818 msleep_interruptible(500);
5819 if (signal_pending(current))
5822 REG_WR(bp, BNX2_EMAC_LED, 0);
5823 REG_WR(bp, BNX2_MISC_CFG, save);
5828 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5830 struct bnx2 *bp = netdev_priv(dev);
5832 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5833 return (ethtool_op_set_tx_hw_csum(dev, data));
5835 return (ethtool_op_set_tx_csum(dev, data));
5838 static const struct ethtool_ops bnx2_ethtool_ops = {
5839 .get_settings = bnx2_get_settings,
5840 .set_settings = bnx2_set_settings,
5841 .get_drvinfo = bnx2_get_drvinfo,
5842 .get_regs_len = bnx2_get_regs_len,
5843 .get_regs = bnx2_get_regs,
5844 .get_wol = bnx2_get_wol,
5845 .set_wol = bnx2_set_wol,
5846 .nway_reset = bnx2_nway_reset,
5847 .get_link = ethtool_op_get_link,
5848 .get_eeprom_len = bnx2_get_eeprom_len,
5849 .get_eeprom = bnx2_get_eeprom,
5850 .set_eeprom = bnx2_set_eeprom,
5851 .get_coalesce = bnx2_get_coalesce,
5852 .set_coalesce = bnx2_set_coalesce,
5853 .get_ringparam = bnx2_get_ringparam,
5854 .set_ringparam = bnx2_set_ringparam,
5855 .get_pauseparam = bnx2_get_pauseparam,
5856 .set_pauseparam = bnx2_set_pauseparam,
5857 .get_rx_csum = bnx2_get_rx_csum,
5858 .set_rx_csum = bnx2_set_rx_csum,
5859 .get_tx_csum = ethtool_op_get_tx_csum,
5860 .set_tx_csum = bnx2_set_tx_csum,
5861 .get_sg = ethtool_op_get_sg,
5862 .set_sg = ethtool_op_set_sg,
5863 .get_tso = ethtool_op_get_tso,
5864 .set_tso = bnx2_set_tso,
5865 .self_test_count = bnx2_self_test_count,
5866 .self_test = bnx2_self_test,
5867 .get_strings = bnx2_get_strings,
5868 .phys_id = bnx2_phys_id,
5869 .get_stats_count = bnx2_get_stats_count,
5870 .get_ethtool_stats = bnx2_get_ethtool_stats,
5871 .get_perm_addr = ethtool_op_get_perm_addr,
5874 /* Called with rtnl_lock */
5876 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5878 struct mii_ioctl_data *data = if_mii(ifr);
5879 struct bnx2 *bp = netdev_priv(dev);
5884 data->phy_id = bp->phy_addr;
5890 if (!netif_running(dev))
5893 spin_lock_bh(&bp->phy_lock);
5894 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5895 spin_unlock_bh(&bp->phy_lock);
5897 data->val_out = mii_regval;
5903 if (!capable(CAP_NET_ADMIN))
5906 if (!netif_running(dev))
5909 spin_lock_bh(&bp->phy_lock);
5910 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5911 spin_unlock_bh(&bp->phy_lock);
5922 /* Called with rtnl_lock */
5924 bnx2_change_mac_addr(struct net_device *dev, void *p)
5926 struct sockaddr *addr = p;
5927 struct bnx2 *bp = netdev_priv(dev);
5929 if (!is_valid_ether_addr(addr->sa_data))
5932 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5933 if (netif_running(dev))
5934 bnx2_set_mac_addr(bp);
5939 /* Called with rtnl_lock */
5941 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5943 struct bnx2 *bp = netdev_priv(dev);
5945 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5946 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5950 if (netif_running(dev)) {
5951 bnx2_netif_stop(bp);
5955 bnx2_netif_start(bp);
5960 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5962 poll_bnx2(struct net_device *dev)
5964 struct bnx2 *bp = netdev_priv(dev);
5966 disable_irq(bp->pdev->irq);
5967 bnx2_interrupt(bp->pdev->irq, dev);
5968 enable_irq(bp->pdev->irq);
5972 static void __devinit
5973 bnx2_get_5709_media(struct bnx2 *bp)
5975 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5976 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5979 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5981 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5982 bp->phy_flags |= PHY_SERDES_FLAG;
5986 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5987 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5989 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5991 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5996 bp->phy_flags |= PHY_SERDES_FLAG;
6004 bp->phy_flags |= PHY_SERDES_FLAG;
6010 static int __devinit
6011 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6014 unsigned long mem_len;
6017 u64 dma_mask, persist_dma_mask;
6019 SET_MODULE_OWNER(dev);
6020 SET_NETDEV_DEV(dev, &pdev->dev);
6021 bp = netdev_priv(dev);
6026 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6027 rc = pci_enable_device(pdev);
6029 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6033 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6035 "Cannot find PCI device base address, aborting.\n");
6037 goto err_out_disable;
6040 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6042 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6043 goto err_out_disable;
6046 pci_set_master(pdev);
6048 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6049 if (bp->pm_cap == 0) {
6051 "Cannot find power management capability, aborting.\n");
6053 goto err_out_release;
6059 spin_lock_init(&bp->phy_lock);
6060 spin_lock_init(&bp->indirect_lock);
6061 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6063 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6064 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6065 dev->mem_end = dev->mem_start + mem_len;
6066 dev->irq = pdev->irq;
6068 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6071 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6073 goto err_out_release;
6076 /* Configure byte swap and enable write to the reg_window registers.
6077 * Rely on CPU to do target byte swapping on big endian systems
6078 * The chip's target access swapping will not swap all accesses
6080 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6081 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6082 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6084 bnx2_set_power_state(bp, PCI_D0);
6086 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6088 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6089 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6090 if (bp->pcix_cap == 0) {
6092 "Cannot find PCIX capability, aborting.\n");
6098 /* 5708 cannot support DMA addresses > 40-bit. */
6099 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6100 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6102 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6104 /* Configure DMA attributes. */
6105 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6106 dev->features |= NETIF_F_HIGHDMA;
6107 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6110 "pci_set_consistent_dma_mask failed, aborting.\n");
6113 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6114 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6118 /* Get bus information. */
6119 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6120 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6123 bp->flags |= PCIX_FLAG;
6125 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6127 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6129 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6130 bp->bus_speed_mhz = 133;
6133 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6134 bp->bus_speed_mhz = 100;
6137 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6138 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6139 bp->bus_speed_mhz = 66;
6142 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6143 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6144 bp->bus_speed_mhz = 50;
6147 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6148 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6149 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6150 bp->bus_speed_mhz = 33;
6155 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6156 bp->bus_speed_mhz = 66;
6158 bp->bus_speed_mhz = 33;
6161 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6162 bp->flags |= PCI_32BIT_FLAG;
6164 /* 5706A0 may falsely detect SERR and PERR. */
6165 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6166 reg = REG_RD(bp, PCI_COMMAND);
6167 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6168 REG_WR(bp, PCI_COMMAND, reg);
6170 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6171 !(bp->flags & PCIX_FLAG)) {
6174 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6178 bnx2_init_nvram(bp);
6180 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6182 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6183 BNX2_SHM_HDR_SIGNATURE_SIG) {
6184 u32 off = PCI_FUNC(pdev->devfn) << 2;
6186 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6188 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6190 /* Get the permanent MAC address. First we need to make sure the
6191 * firmware is actually running.
6193 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6195 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6196 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6197 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6202 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6204 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6205 bp->mac_addr[0] = (u8) (reg >> 8);
6206 bp->mac_addr[1] = (u8) reg;
6208 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6209 bp->mac_addr[2] = (u8) (reg >> 24);
6210 bp->mac_addr[3] = (u8) (reg >> 16);
6211 bp->mac_addr[4] = (u8) (reg >> 8);
6212 bp->mac_addr[5] = (u8) reg;
6214 bp->tx_ring_size = MAX_TX_DESC_CNT;
6215 bnx2_set_rx_ring_size(bp, 255);
6219 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6221 bp->tx_quick_cons_trip_int = 20;
6222 bp->tx_quick_cons_trip = 20;
6223 bp->tx_ticks_int = 80;
6226 bp->rx_quick_cons_trip_int = 6;
6227 bp->rx_quick_cons_trip = 6;
6228 bp->rx_ticks_int = 18;
6231 bp->stats_ticks = 1000000 & 0xffff00;
6233 bp->timer_interval = HZ;
6234 bp->current_interval = HZ;
6238 /* Disable WOL support if we are running on a SERDES chip. */
6239 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6240 bnx2_get_5709_media(bp);
6241 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6242 bp->phy_flags |= PHY_SERDES_FLAG;
6244 if (bp->phy_flags & PHY_SERDES_FLAG) {
6245 bp->flags |= NO_WOL_FLAG;
6246 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6248 reg = REG_RD_IND(bp, bp->shmem_base +
6249 BNX2_SHARED_HW_CFG_CONFIG);
6250 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6251 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6253 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6254 CHIP_NUM(bp) == CHIP_NUM_5708)
6255 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6256 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6257 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6259 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6260 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6261 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6262 bp->flags |= NO_WOL_FLAG;
6264 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6265 bp->tx_quick_cons_trip_int =
6266 bp->tx_quick_cons_trip;
6267 bp->tx_ticks_int = bp->tx_ticks;
6268 bp->rx_quick_cons_trip_int =
6269 bp->rx_quick_cons_trip;
6270 bp->rx_ticks_int = bp->rx_ticks;
6271 bp->comp_prod_trip_int = bp->comp_prod_trip;
6272 bp->com_ticks_int = bp->com_ticks;
6273 bp->cmd_ticks_int = bp->cmd_ticks;
6276 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6278 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6279 * with byte enables disabled on the unused 32-bit word. This is legal
6280 * but causes problems on the AMD 8132 which will eventually stop
6281 * responding after a while.
6283 * AMD believes this incompatibility is unique to the 5706, and
6284 * prefers to locally disable MSI rather than globally disabling it.
6286 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6287 struct pci_dev *amd_8132 = NULL;
6289 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6290 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6294 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6295 if (rev >= 0x10 && rev <= 0x13) {
6297 pci_dev_put(amd_8132);
6303 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6304 bp->req_line_speed = 0;
6305 if (bp->phy_flags & PHY_SERDES_FLAG) {
6306 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6308 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6309 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6310 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6312 bp->req_line_speed = bp->line_speed = SPEED_1000;
6313 bp->req_duplex = DUPLEX_FULL;
6317 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6320 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6322 init_timer(&bp->timer);
6323 bp->timer.expires = RUN_AT(bp->timer_interval);
6324 bp->timer.data = (unsigned long) bp;
6325 bp->timer.function = bnx2_timer;
6331 iounmap(bp->regview);
6336 pci_release_regions(pdev);
6339 pci_disable_device(pdev);
6340 pci_set_drvdata(pdev, NULL);
6346 static int __devinit
6347 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6349 static int version_printed = 0;
6350 struct net_device *dev = NULL;
6354 if (version_printed++ == 0)
6355 printk(KERN_INFO "%s", version);
6357 /* dev zeroed in init_etherdev */
6358 dev = alloc_etherdev(sizeof(*bp));
6363 rc = bnx2_init_board(pdev, dev);
6369 dev->open = bnx2_open;
6370 dev->hard_start_xmit = bnx2_start_xmit;
6371 dev->stop = bnx2_close;
6372 dev->get_stats = bnx2_get_stats;
6373 dev->set_multicast_list = bnx2_set_rx_mode;
6374 dev->do_ioctl = bnx2_ioctl;
6375 dev->set_mac_address = bnx2_change_mac_addr;
6376 dev->change_mtu = bnx2_change_mtu;
6377 dev->tx_timeout = bnx2_tx_timeout;
6378 dev->watchdog_timeo = TX_TIMEOUT;
6380 dev->vlan_rx_register = bnx2_vlan_rx_register;
6381 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6383 dev->poll = bnx2_poll;
6384 dev->ethtool_ops = &bnx2_ethtool_ops;
6387 bp = netdev_priv(dev);
6389 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6390 dev->poll_controller = poll_bnx2;
6393 pci_set_drvdata(pdev, dev);
6395 memcpy(dev->dev_addr, bp->mac_addr, 6);
6396 memcpy(dev->perm_addr, bp->mac_addr, 6);
6397 bp->name = board_info[ent->driver_data].name;
6399 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6400 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6402 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6404 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6406 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6407 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6408 dev->features |= NETIF_F_TSO6;
6410 if ((rc = register_netdev(dev))) {
6411 dev_err(&pdev->dev, "Cannot register net device\n");
6413 iounmap(bp->regview);
6414 pci_release_regions(pdev);
6415 pci_disable_device(pdev);
6416 pci_set_drvdata(pdev, NULL);
6421 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6425 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6426 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6427 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6428 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6433 printk("node addr ");
6434 for (i = 0; i < 6; i++)
6435 printk("%2.2x", dev->dev_addr[i]);
6441 static void __devexit
6442 bnx2_remove_one(struct pci_dev *pdev)
6444 struct net_device *dev = pci_get_drvdata(pdev);
6445 struct bnx2 *bp = netdev_priv(dev);
6447 flush_scheduled_work();
6449 unregister_netdev(dev);
6452 iounmap(bp->regview);
6455 pci_release_regions(pdev);
6456 pci_disable_device(pdev);
6457 pci_set_drvdata(pdev, NULL);
6461 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6463 struct net_device *dev = pci_get_drvdata(pdev);
6464 struct bnx2 *bp = netdev_priv(dev);
6467 if (!netif_running(dev))
6470 flush_scheduled_work();
6471 bnx2_netif_stop(bp);
6472 netif_device_detach(dev);
6473 del_timer_sync(&bp->timer);
6474 if (bp->flags & NO_WOL_FLAG)
6475 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6477 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6479 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6480 bnx2_reset_chip(bp, reset_code);
6482 pci_save_state(pdev);
6483 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6488 bnx2_resume(struct pci_dev *pdev)
6490 struct net_device *dev = pci_get_drvdata(pdev);
6491 struct bnx2 *bp = netdev_priv(dev);
6493 if (!netif_running(dev))
6496 pci_restore_state(pdev);
6497 bnx2_set_power_state(bp, PCI_D0);
6498 netif_device_attach(dev);
6500 bnx2_netif_start(bp);
6504 static struct pci_driver bnx2_pci_driver = {
6505 .name = DRV_MODULE_NAME,
6506 .id_table = bnx2_pci_tbl,
6507 .probe = bnx2_init_one,
6508 .remove = __devexit_p(bnx2_remove_one),
6509 .suspend = bnx2_suspend,
6510 .resume = bnx2_resume,
6513 static int __init bnx2_init(void)
6515 return pci_register_driver(&bnx2_pci_driver);
6518 static void __exit bnx2_cleanup(void)
6520 pci_unregister_driver(&bnx2_pci_driver);
6523 module_init(bnx2_init);
6524 module_exit(bnx2_cleanup);