1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
15 #define DRV_MODULE_NAME "bnx2"
16 #define PFX DRV_MODULE_NAME ": "
17 #define DRV_MODULE_VERSION "1.4.38"
18 #define DRV_MODULE_RELDATE "February 10, 2006"
20 #define RUN_AT(x) (jiffies + (x))
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT (5*HZ)
25 static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
33 static int disable_msi = 0;
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
48 /* indexed by board_t, above */
51 } board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
57 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
61 static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
79 static struct flash_spec flash_table[] =
82 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
86 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
91 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
93 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
99 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
220 for (i = 0; i < 50; i++) {
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
277 for (i = 0; i < 50; i++) {
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
306 bnx2_disable_int(struct bnx2 *bp)
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
314 bnx2_enable_int(struct bnx2 *bp)
316 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
317 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
318 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
320 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
321 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
323 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
327 bnx2_disable_int_sync(struct bnx2 *bp)
329 atomic_inc(&bp->intr_sem);
330 bnx2_disable_int(bp);
331 synchronize_irq(bp->pdev->irq);
335 bnx2_netif_stop(struct bnx2 *bp)
337 bnx2_disable_int_sync(bp);
338 if (netif_running(bp->dev)) {
339 netif_poll_disable(bp->dev);
340 netif_tx_disable(bp->dev);
341 bp->dev->trans_start = jiffies; /* prevent tx timeout */
346 bnx2_netif_start(struct bnx2 *bp)
348 if (atomic_dec_and_test(&bp->intr_sem)) {
349 if (netif_running(bp->dev)) {
350 netif_wake_queue(bp->dev);
351 netif_poll_enable(bp->dev);
358 bnx2_free_mem(struct bnx2 *bp)
362 if (bp->status_blk) {
363 pci_free_consistent(bp->pdev, bp->status_stats_size,
364 bp->status_blk, bp->status_blk_mapping);
365 bp->status_blk = NULL;
366 bp->stats_blk = NULL;
368 if (bp->tx_desc_ring) {
369 pci_free_consistent(bp->pdev,
370 sizeof(struct tx_bd) * TX_DESC_CNT,
371 bp->tx_desc_ring, bp->tx_desc_mapping);
372 bp->tx_desc_ring = NULL;
374 kfree(bp->tx_buf_ring);
375 bp->tx_buf_ring = NULL;
376 for (i = 0; i < bp->rx_max_ring; i++) {
377 if (bp->rx_desc_ring[i])
378 pci_free_consistent(bp->pdev,
379 sizeof(struct rx_bd) * RX_DESC_CNT,
381 bp->rx_desc_mapping[i]);
382 bp->rx_desc_ring[i] = NULL;
384 vfree(bp->rx_buf_ring);
385 bp->rx_buf_ring = NULL;
389 bnx2_alloc_mem(struct bnx2 *bp)
391 int i, status_blk_size;
393 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
395 if (bp->tx_buf_ring == NULL)
398 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
399 sizeof(struct tx_bd) *
401 &bp->tx_desc_mapping);
402 if (bp->tx_desc_ring == NULL)
405 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
407 if (bp->rx_buf_ring == NULL)
410 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
413 for (i = 0; i < bp->rx_max_ring; i++) {
414 bp->rx_desc_ring[i] =
415 pci_alloc_consistent(bp->pdev,
416 sizeof(struct rx_bd) * RX_DESC_CNT,
417 &bp->rx_desc_mapping[i]);
418 if (bp->rx_desc_ring[i] == NULL)
423 /* Combine status and statistics blocks into one allocation. */
424 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
425 bp->status_stats_size = status_blk_size +
426 sizeof(struct statistics_block);
428 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
429 &bp->status_blk_mapping);
430 if (bp->status_blk == NULL)
433 memset(bp->status_blk, 0, bp->status_stats_size);
435 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
438 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
448 bnx2_report_fw_link(struct bnx2 *bp)
450 u32 fw_link_status = 0;
455 switch (bp->line_speed) {
457 if (bp->duplex == DUPLEX_HALF)
458 fw_link_status = BNX2_LINK_STATUS_10HALF;
460 fw_link_status = BNX2_LINK_STATUS_10FULL;
463 if (bp->duplex == DUPLEX_HALF)
464 fw_link_status = BNX2_LINK_STATUS_100HALF;
466 fw_link_status = BNX2_LINK_STATUS_100FULL;
469 if (bp->duplex == DUPLEX_HALF)
470 fw_link_status = BNX2_LINK_STATUS_1000HALF;
472 fw_link_status = BNX2_LINK_STATUS_1000FULL;
475 if (bp->duplex == DUPLEX_HALF)
476 fw_link_status = BNX2_LINK_STATUS_2500HALF;
478 fw_link_status = BNX2_LINK_STATUS_2500FULL;
482 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
485 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
487 bnx2_read_phy(bp, MII_BMSR, &bmsr);
488 bnx2_read_phy(bp, MII_BMSR, &bmsr);
490 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
491 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
492 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
494 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
498 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
500 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
504 bnx2_report_link(struct bnx2 *bp)
507 netif_carrier_on(bp->dev);
508 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
510 printk("%d Mbps ", bp->line_speed);
512 if (bp->duplex == DUPLEX_FULL)
513 printk("full duplex");
515 printk("half duplex");
518 if (bp->flow_ctrl & FLOW_CTRL_RX) {
519 printk(", receive ");
520 if (bp->flow_ctrl & FLOW_CTRL_TX)
521 printk("& transmit ");
524 printk(", transmit ");
526 printk("flow control ON");
531 netif_carrier_off(bp->dev);
532 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
535 bnx2_report_fw_link(bp);
539 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
541 u32 local_adv, remote_adv;
544 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
545 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
547 if (bp->duplex == DUPLEX_FULL) {
548 bp->flow_ctrl = bp->req_flow_ctrl;
553 if (bp->duplex != DUPLEX_FULL) {
557 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
558 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
561 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
562 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
563 bp->flow_ctrl |= FLOW_CTRL_TX;
564 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
565 bp->flow_ctrl |= FLOW_CTRL_RX;
569 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
570 bnx2_read_phy(bp, MII_LPA, &remote_adv);
572 if (bp->phy_flags & PHY_SERDES_FLAG) {
573 u32 new_local_adv = 0;
574 u32 new_remote_adv = 0;
576 if (local_adv & ADVERTISE_1000XPAUSE)
577 new_local_adv |= ADVERTISE_PAUSE_CAP;
578 if (local_adv & ADVERTISE_1000XPSE_ASYM)
579 new_local_adv |= ADVERTISE_PAUSE_ASYM;
580 if (remote_adv & ADVERTISE_1000XPAUSE)
581 new_remote_adv |= ADVERTISE_PAUSE_CAP;
582 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
583 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
585 local_adv = new_local_adv;
586 remote_adv = new_remote_adv;
589 /* See Table 28B-3 of 802.3ab-1999 spec. */
590 if (local_adv & ADVERTISE_PAUSE_CAP) {
591 if(local_adv & ADVERTISE_PAUSE_ASYM) {
592 if (remote_adv & ADVERTISE_PAUSE_CAP) {
593 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
595 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
596 bp->flow_ctrl = FLOW_CTRL_RX;
600 if (remote_adv & ADVERTISE_PAUSE_CAP) {
601 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
605 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
606 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
607 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
609 bp->flow_ctrl = FLOW_CTRL_TX;
615 bnx2_5708s_linkup(struct bnx2 *bp)
620 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
621 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
622 case BCM5708S_1000X_STAT1_SPEED_10:
623 bp->line_speed = SPEED_10;
625 case BCM5708S_1000X_STAT1_SPEED_100:
626 bp->line_speed = SPEED_100;
628 case BCM5708S_1000X_STAT1_SPEED_1G:
629 bp->line_speed = SPEED_1000;
631 case BCM5708S_1000X_STAT1_SPEED_2G5:
632 bp->line_speed = SPEED_2500;
635 if (val & BCM5708S_1000X_STAT1_FD)
636 bp->duplex = DUPLEX_FULL;
638 bp->duplex = DUPLEX_HALF;
644 bnx2_5706s_linkup(struct bnx2 *bp)
646 u32 bmcr, local_adv, remote_adv, common;
649 bp->line_speed = SPEED_1000;
651 bnx2_read_phy(bp, MII_BMCR, &bmcr);
652 if (bmcr & BMCR_FULLDPLX) {
653 bp->duplex = DUPLEX_FULL;
656 bp->duplex = DUPLEX_HALF;
659 if (!(bmcr & BMCR_ANENABLE)) {
663 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
664 bnx2_read_phy(bp, MII_LPA, &remote_adv);
666 common = local_adv & remote_adv;
667 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
669 if (common & ADVERTISE_1000XFULL) {
670 bp->duplex = DUPLEX_FULL;
673 bp->duplex = DUPLEX_HALF;
681 bnx2_copper_linkup(struct bnx2 *bp)
685 bnx2_read_phy(bp, MII_BMCR, &bmcr);
686 if (bmcr & BMCR_ANENABLE) {
687 u32 local_adv, remote_adv, common;
689 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
690 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
692 common = local_adv & (remote_adv >> 2);
693 if (common & ADVERTISE_1000FULL) {
694 bp->line_speed = SPEED_1000;
695 bp->duplex = DUPLEX_FULL;
697 else if (common & ADVERTISE_1000HALF) {
698 bp->line_speed = SPEED_1000;
699 bp->duplex = DUPLEX_HALF;
702 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
703 bnx2_read_phy(bp, MII_LPA, &remote_adv);
705 common = local_adv & remote_adv;
706 if (common & ADVERTISE_100FULL) {
707 bp->line_speed = SPEED_100;
708 bp->duplex = DUPLEX_FULL;
710 else if (common & ADVERTISE_100HALF) {
711 bp->line_speed = SPEED_100;
712 bp->duplex = DUPLEX_HALF;
714 else if (common & ADVERTISE_10FULL) {
715 bp->line_speed = SPEED_10;
716 bp->duplex = DUPLEX_FULL;
718 else if (common & ADVERTISE_10HALF) {
719 bp->line_speed = SPEED_10;
720 bp->duplex = DUPLEX_HALF;
729 if (bmcr & BMCR_SPEED100) {
730 bp->line_speed = SPEED_100;
733 bp->line_speed = SPEED_10;
735 if (bmcr & BMCR_FULLDPLX) {
736 bp->duplex = DUPLEX_FULL;
739 bp->duplex = DUPLEX_HALF;
747 bnx2_set_mac_link(struct bnx2 *bp)
751 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
752 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
753 (bp->duplex == DUPLEX_HALF)) {
754 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
757 /* Configure the EMAC mode register. */
758 val = REG_RD(bp, BNX2_EMAC_MODE);
760 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
761 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
765 switch (bp->line_speed) {
767 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
768 val |= BNX2_EMAC_MODE_PORT_MII_10;
773 val |= BNX2_EMAC_MODE_PORT_MII;
776 val |= BNX2_EMAC_MODE_25G;
779 val |= BNX2_EMAC_MODE_PORT_GMII;
784 val |= BNX2_EMAC_MODE_PORT_GMII;
787 /* Set the MAC to operate in the appropriate duplex mode. */
788 if (bp->duplex == DUPLEX_HALF)
789 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
790 REG_WR(bp, BNX2_EMAC_MODE, val);
792 /* Enable/disable rx PAUSE. */
793 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
795 if (bp->flow_ctrl & FLOW_CTRL_RX)
796 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
797 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
799 /* Enable/disable tx PAUSE. */
800 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
801 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
803 if (bp->flow_ctrl & FLOW_CTRL_TX)
804 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
805 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
807 /* Acknowledge the interrupt. */
808 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
814 bnx2_set_link(struct bnx2 *bp)
819 if (bp->loopback == MAC_LOOPBACK) {
824 link_up = bp->link_up;
826 bnx2_read_phy(bp, MII_BMSR, &bmsr);
827 bnx2_read_phy(bp, MII_BMSR, &bmsr);
829 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
830 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
833 val = REG_RD(bp, BNX2_EMAC_STATUS);
834 if (val & BNX2_EMAC_STATUS_LINK)
835 bmsr |= BMSR_LSTATUS;
837 bmsr &= ~BMSR_LSTATUS;
840 if (bmsr & BMSR_LSTATUS) {
843 if (bp->phy_flags & PHY_SERDES_FLAG) {
844 if (CHIP_NUM(bp) == CHIP_NUM_5706)
845 bnx2_5706s_linkup(bp);
846 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
847 bnx2_5708s_linkup(bp);
850 bnx2_copper_linkup(bp);
852 bnx2_resolve_flow_ctrl(bp);
855 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
856 (bp->autoneg & AUTONEG_SPEED)) {
860 bnx2_read_phy(bp, MII_BMCR, &bmcr);
861 if (!(bmcr & BMCR_ANENABLE)) {
862 bnx2_write_phy(bp, MII_BMCR, bmcr |
866 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
870 if (bp->link_up != link_up) {
871 bnx2_report_link(bp);
874 bnx2_set_mac_link(bp);
880 bnx2_reset_phy(struct bnx2 *bp)
885 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
887 #define PHY_RESET_MAX_WAIT 100
888 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
891 bnx2_read_phy(bp, MII_BMCR, ®);
892 if (!(reg & BMCR_RESET)) {
897 if (i == PHY_RESET_MAX_WAIT) {
904 bnx2_phy_get_pause_adv(struct bnx2 *bp)
908 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
909 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
911 if (bp->phy_flags & PHY_SERDES_FLAG) {
912 adv = ADVERTISE_1000XPAUSE;
915 adv = ADVERTISE_PAUSE_CAP;
918 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
919 if (bp->phy_flags & PHY_SERDES_FLAG) {
920 adv = ADVERTISE_1000XPSE_ASYM;
923 adv = ADVERTISE_PAUSE_ASYM;
926 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
927 if (bp->phy_flags & PHY_SERDES_FLAG) {
928 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
931 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
938 bnx2_setup_serdes_phy(struct bnx2 *bp)
943 if (!(bp->autoneg & AUTONEG_SPEED)) {
945 int force_link_down = 0;
947 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
948 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
949 if (up1 & BCM5708S_UP1_2G5) {
950 up1 &= ~BCM5708S_UP1_2G5;
951 bnx2_write_phy(bp, BCM5708S_UP1, up1);
956 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
957 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
959 bnx2_read_phy(bp, MII_BMCR, &bmcr);
960 new_bmcr = bmcr & ~BMCR_ANENABLE;
961 new_bmcr |= BMCR_SPEED1000;
962 if (bp->req_duplex == DUPLEX_FULL) {
963 adv |= ADVERTISE_1000XFULL;
964 new_bmcr |= BMCR_FULLDPLX;
967 adv |= ADVERTISE_1000XHALF;
968 new_bmcr &= ~BMCR_FULLDPLX;
970 if ((new_bmcr != bmcr) || (force_link_down)) {
971 /* Force a link down visible on the other side */
973 bnx2_write_phy(bp, MII_ADVERTISE, adv &
974 ~(ADVERTISE_1000XFULL |
975 ADVERTISE_1000XHALF));
976 bnx2_write_phy(bp, MII_BMCR, bmcr |
977 BMCR_ANRESTART | BMCR_ANENABLE);
980 netif_carrier_off(bp->dev);
981 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
983 bnx2_write_phy(bp, MII_ADVERTISE, adv);
984 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
989 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
990 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
991 up1 |= BCM5708S_UP1_2G5;
992 bnx2_write_phy(bp, BCM5708S_UP1, up1);
995 if (bp->advertising & ADVERTISED_1000baseT_Full)
996 new_adv |= ADVERTISE_1000XFULL;
998 new_adv |= bnx2_phy_get_pause_adv(bp);
1000 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1001 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1003 bp->serdes_an_pending = 0;
1004 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1005 /* Force a link down visible on the other side */
1009 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1010 for (i = 0; i < 110; i++) {
1015 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1016 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1018 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1019 /* Speed up link-up time when the link partner
1020 * does not autonegotiate which is very common
1021 * in blade servers. Some blade servers use
1022 * IPMI for kerboard input and it's important
1023 * to minimize link disruptions. Autoneg. involves
1024 * exchanging base pages plus 3 next pages and
1025 * normally completes in about 120 msec.
1027 bp->current_interval = SERDES_AN_TIMEOUT;
1028 bp->serdes_an_pending = 1;
1029 mod_timer(&bp->timer, jiffies + bp->current_interval);
1036 #define ETHTOOL_ALL_FIBRE_SPEED \
1037 (ADVERTISED_1000baseT_Full)
1039 #define ETHTOOL_ALL_COPPER_SPEED \
1040 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1041 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1042 ADVERTISED_1000baseT_Full)
1044 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1045 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1047 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1050 bnx2_setup_copper_phy(struct bnx2 *bp)
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1057 if (bp->autoneg & AUTONEG_SPEED) {
1058 u32 adv_reg, adv1000_reg;
1059 u32 new_adv_reg = 0;
1060 u32 new_adv1000_reg = 0;
1062 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1063 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1064 ADVERTISE_PAUSE_ASYM);
1066 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1067 adv1000_reg &= PHY_ALL_1000_SPEED;
1069 if (bp->advertising & ADVERTISED_10baseT_Half)
1070 new_adv_reg |= ADVERTISE_10HALF;
1071 if (bp->advertising & ADVERTISED_10baseT_Full)
1072 new_adv_reg |= ADVERTISE_10FULL;
1073 if (bp->advertising & ADVERTISED_100baseT_Half)
1074 new_adv_reg |= ADVERTISE_100HALF;
1075 if (bp->advertising & ADVERTISED_100baseT_Full)
1076 new_adv_reg |= ADVERTISE_100FULL;
1077 if (bp->advertising & ADVERTISED_1000baseT_Full)
1078 new_adv1000_reg |= ADVERTISE_1000FULL;
1080 new_adv_reg |= ADVERTISE_CSMA;
1082 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1084 if ((adv1000_reg != new_adv1000_reg) ||
1085 (adv_reg != new_adv_reg) ||
1086 ((bmcr & BMCR_ANENABLE) == 0)) {
1088 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1089 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1090 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1093 else if (bp->link_up) {
1094 /* Flow ctrl may have changed from auto to forced */
1095 /* or vice-versa. */
1097 bnx2_resolve_flow_ctrl(bp);
1098 bnx2_set_mac_link(bp);
1104 if (bp->req_line_speed == SPEED_100) {
1105 new_bmcr |= BMCR_SPEED100;
1107 if (bp->req_duplex == DUPLEX_FULL) {
1108 new_bmcr |= BMCR_FULLDPLX;
1110 if (new_bmcr != bmcr) {
1114 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1115 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1117 if (bmsr & BMSR_LSTATUS) {
1118 /* Force link down */
1119 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1122 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1123 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1125 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1128 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1130 /* Normally, the new speed is setup after the link has
1131 * gone down and up again. In some cases, link will not go
1132 * down so we need to set up the new speed here.
1134 if (bmsr & BMSR_LSTATUS) {
1135 bp->line_speed = bp->req_line_speed;
1136 bp->duplex = bp->req_duplex;
1137 bnx2_resolve_flow_ctrl(bp);
1138 bnx2_set_mac_link(bp);
1145 bnx2_setup_phy(struct bnx2 *bp)
1147 if (bp->loopback == MAC_LOOPBACK)
1150 if (bp->phy_flags & PHY_SERDES_FLAG) {
1151 return (bnx2_setup_serdes_phy(bp));
1154 return (bnx2_setup_copper_phy(bp));
1159 bnx2_init_5708s_phy(struct bnx2 *bp)
1163 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1164 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1165 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1167 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1168 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1169 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1171 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1172 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1173 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1175 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1176 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1177 val |= BCM5708S_UP1_2G5;
1178 bnx2_write_phy(bp, BCM5708S_UP1, val);
1181 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1182 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1183 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1184 /* increase tx signal amplitude */
1185 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1186 BCM5708S_BLK_ADDR_TX_MISC);
1187 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1188 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1189 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1190 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1193 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1194 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1199 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1200 BNX2_SHARED_HW_CFG_CONFIG);
1201 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1202 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1203 BCM5708S_BLK_ADDR_TX_MISC);
1204 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1205 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1206 BCM5708S_BLK_ADDR_DIG);
1213 bnx2_init_5706s_phy(struct bnx2 *bp)
1215 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1217 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1218 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1221 if (bp->dev->mtu > 1500) {
1224 /* Set extended packet length bit */
1225 bnx2_write_phy(bp, 0x18, 0x7);
1226 bnx2_read_phy(bp, 0x18, &val);
1227 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1229 bnx2_write_phy(bp, 0x1c, 0x6c00);
1230 bnx2_read_phy(bp, 0x1c, &val);
1231 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1236 bnx2_write_phy(bp, 0x18, 0x7);
1237 bnx2_read_phy(bp, 0x18, &val);
1238 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1240 bnx2_write_phy(bp, 0x1c, 0x6c00);
1241 bnx2_read_phy(bp, 0x1c, &val);
1242 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1249 bnx2_init_copper_phy(struct bnx2 *bp)
1253 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1255 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1256 bnx2_write_phy(bp, 0x18, 0x0c00);
1257 bnx2_write_phy(bp, 0x17, 0x000a);
1258 bnx2_write_phy(bp, 0x15, 0x310b);
1259 bnx2_write_phy(bp, 0x17, 0x201f);
1260 bnx2_write_phy(bp, 0x15, 0x9506);
1261 bnx2_write_phy(bp, 0x17, 0x401f);
1262 bnx2_write_phy(bp, 0x15, 0x14e2);
1263 bnx2_write_phy(bp, 0x18, 0x0400);
1266 if (bp->dev->mtu > 1500) {
1267 /* Set extended packet length bit */
1268 bnx2_write_phy(bp, 0x18, 0x7);
1269 bnx2_read_phy(bp, 0x18, &val);
1270 bnx2_write_phy(bp, 0x18, val | 0x4000);
1272 bnx2_read_phy(bp, 0x10, &val);
1273 bnx2_write_phy(bp, 0x10, val | 0x1);
1276 bnx2_write_phy(bp, 0x18, 0x7);
1277 bnx2_read_phy(bp, 0x18, &val);
1278 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1280 bnx2_read_phy(bp, 0x10, &val);
1281 bnx2_write_phy(bp, 0x10, val & ~0x1);
1284 /* ethernet@wirespeed */
1285 bnx2_write_phy(bp, 0x18, 0x7007);
1286 bnx2_read_phy(bp, 0x18, &val);
1287 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1293 bnx2_init_phy(struct bnx2 *bp)
1298 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1299 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1301 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1305 bnx2_read_phy(bp, MII_PHYSID1, &val);
1306 bp->phy_id = val << 16;
1307 bnx2_read_phy(bp, MII_PHYSID2, &val);
1308 bp->phy_id |= val & 0xffff;
1310 if (bp->phy_flags & PHY_SERDES_FLAG) {
1311 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1312 rc = bnx2_init_5706s_phy(bp);
1313 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1314 rc = bnx2_init_5708s_phy(bp);
1317 rc = bnx2_init_copper_phy(bp);
1326 bnx2_set_mac_loopback(struct bnx2 *bp)
1330 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1331 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1332 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1333 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1338 static int bnx2_test_link(struct bnx2 *);
1341 bnx2_set_phy_loopback(struct bnx2 *bp)
1346 spin_lock_bh(&bp->phy_lock);
1347 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1349 spin_unlock_bh(&bp->phy_lock);
1353 for (i = 0; i < 10; i++) {
1354 if (bnx2_test_link(bp) == 0)
1359 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1360 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1361 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1362 BNX2_EMAC_MODE_25G);
1364 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1365 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1371 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1377 msg_data |= bp->fw_wr_seq;
1379 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1381 /* wait for an acknowledgement. */
1382 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1385 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1387 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1390 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1393 /* If we timed out, inform the firmware that this is the case. */
1394 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1396 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1399 msg_data &= ~BNX2_DRV_MSG_CODE;
1400 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1402 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1407 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1414 bnx2_init_context(struct bnx2 *bp)
1420 u32 vcid_addr, pcid_addr, offset;
1424 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1427 vcid_addr = GET_PCID_ADDR(vcid);
1429 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1434 pcid_addr = GET_PCID_ADDR(new_vcid);
1437 vcid_addr = GET_CID_ADDR(vcid);
1438 pcid_addr = vcid_addr;
1441 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1442 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1444 /* Zero out the context. */
1445 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1446 CTX_WR(bp, 0x00, offset, 0);
1449 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1450 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1455 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1461 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1462 if (good_mbuf == NULL) {
1463 printk(KERN_ERR PFX "Failed to allocate memory in "
1464 "bnx2_alloc_bad_rbuf\n");
1468 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1469 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1473 /* Allocate a bunch of mbufs and save the good ones in an array. */
1474 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1475 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1476 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1478 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1480 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1482 /* The addresses with Bit 9 set are bad memory blocks. */
1483 if (!(val & (1 << 9))) {
1484 good_mbuf[good_mbuf_cnt] = (u16) val;
1488 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1491 /* Free the good ones back to the mbuf pool thus discarding
1492 * all the bad ones. */
1493 while (good_mbuf_cnt) {
1496 val = good_mbuf[good_mbuf_cnt];
1497 val = (val << 9) | val | 1;
1499 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1506 bnx2_set_mac_addr(struct bnx2 *bp)
1509 u8 *mac_addr = bp->dev->dev_addr;
1511 val = (mac_addr[0] << 8) | mac_addr[1];
1513 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1515 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1516 (mac_addr[4] << 8) | mac_addr[5];
1518 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1522 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1524 struct sk_buff *skb;
1525 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1527 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1528 unsigned long align;
1530 skb = dev_alloc_skb(bp->rx_buf_size);
1535 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1536 skb_reserve(skb, 8 - align);
1540 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1541 PCI_DMA_FROMDEVICE);
1544 pci_unmap_addr_set(rx_buf, mapping, mapping);
1546 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1547 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1549 bp->rx_prod_bseq += bp->rx_buf_use_size;
1555 bnx2_phy_int(struct bnx2 *bp)
1557 u32 new_link_state, old_link_state;
1559 new_link_state = bp->status_blk->status_attn_bits &
1560 STATUS_ATTN_BITS_LINK_STATE;
1561 old_link_state = bp->status_blk->status_attn_bits_ack &
1562 STATUS_ATTN_BITS_LINK_STATE;
1563 if (new_link_state != old_link_state) {
1564 if (new_link_state) {
1565 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1566 STATUS_ATTN_BITS_LINK_STATE);
1569 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1570 STATUS_ATTN_BITS_LINK_STATE);
1577 bnx2_tx_int(struct bnx2 *bp)
1579 struct status_block *sblk = bp->status_blk;
1580 u16 hw_cons, sw_cons, sw_ring_cons;
1583 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1584 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1587 sw_cons = bp->tx_cons;
1589 while (sw_cons != hw_cons) {
1590 struct sw_bd *tx_buf;
1591 struct sk_buff *skb;
1594 sw_ring_cons = TX_RING_IDX(sw_cons);
1596 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1599 /* partial BD completions possible with TSO packets */
1600 if (skb_shinfo(skb)->tso_size) {
1601 u16 last_idx, last_ring_idx;
1603 last_idx = sw_cons +
1604 skb_shinfo(skb)->nr_frags + 1;
1605 last_ring_idx = sw_ring_cons +
1606 skb_shinfo(skb)->nr_frags + 1;
1607 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1610 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1615 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1616 skb_headlen(skb), PCI_DMA_TODEVICE);
1619 last = skb_shinfo(skb)->nr_frags;
1621 for (i = 0; i < last; i++) {
1622 sw_cons = NEXT_TX_BD(sw_cons);
1624 pci_unmap_page(bp->pdev,
1626 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1628 skb_shinfo(skb)->frags[i].size,
1632 sw_cons = NEXT_TX_BD(sw_cons);
1634 tx_free_bd += last + 1;
1636 dev_kfree_skb_irq(skb);
1638 hw_cons = bp->hw_tx_cons =
1639 sblk->status_tx_quick_consumer_index0;
1641 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1646 bp->tx_cons = sw_cons;
1648 if (unlikely(netif_queue_stopped(bp->dev))) {
1649 spin_lock(&bp->tx_lock);
1650 if ((netif_queue_stopped(bp->dev)) &&
1651 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1653 netif_wake_queue(bp->dev);
1655 spin_unlock(&bp->tx_lock);
1660 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1663 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1664 struct rx_bd *cons_bd, *prod_bd;
1666 cons_rx_buf = &bp->rx_buf_ring[cons];
1667 prod_rx_buf = &bp->rx_buf_ring[prod];
1669 pci_dma_sync_single_for_device(bp->pdev,
1670 pci_unmap_addr(cons_rx_buf, mapping),
1671 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1673 bp->rx_prod_bseq += bp->rx_buf_use_size;
1675 prod_rx_buf->skb = skb;
1680 pci_unmap_addr_set(prod_rx_buf, mapping,
1681 pci_unmap_addr(cons_rx_buf, mapping));
1683 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1684 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1685 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1686 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1690 bnx2_rx_int(struct bnx2 *bp, int budget)
1692 struct status_block *sblk = bp->status_blk;
1693 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1694 struct l2_fhdr *rx_hdr;
1697 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1698 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1701 sw_cons = bp->rx_cons;
1702 sw_prod = bp->rx_prod;
1704 /* Memory barrier necessary as speculative reads of the rx
1705 * buffer can be ahead of the index in the status block
1708 while (sw_cons != hw_cons) {
1711 struct sw_bd *rx_buf;
1712 struct sk_buff *skb;
1713 dma_addr_t dma_addr;
1715 sw_ring_cons = RX_RING_IDX(sw_cons);
1716 sw_ring_prod = RX_RING_IDX(sw_prod);
1718 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1723 dma_addr = pci_unmap_addr(rx_buf, mapping);
1725 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1726 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1728 rx_hdr = (struct l2_fhdr *) skb->data;
1729 len = rx_hdr->l2_fhdr_pkt_len - 4;
1731 if ((status = rx_hdr->l2_fhdr_status) &
1732 (L2_FHDR_ERRORS_BAD_CRC |
1733 L2_FHDR_ERRORS_PHY_DECODE |
1734 L2_FHDR_ERRORS_ALIGNMENT |
1735 L2_FHDR_ERRORS_TOO_SHORT |
1736 L2_FHDR_ERRORS_GIANT_FRAME)) {
1741 /* Since we don't have a jumbo ring, copy small packets
1744 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1745 struct sk_buff *new_skb;
1747 new_skb = dev_alloc_skb(len + 2);
1748 if (new_skb == NULL)
1752 memcpy(new_skb->data,
1753 skb->data + bp->rx_offset - 2,
1756 skb_reserve(new_skb, 2);
1757 skb_put(new_skb, len);
1758 new_skb->dev = bp->dev;
1760 bnx2_reuse_rx_skb(bp, skb,
1761 sw_ring_cons, sw_ring_prod);
1765 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1766 pci_unmap_single(bp->pdev, dma_addr,
1767 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1769 skb_reserve(skb, bp->rx_offset);
1774 bnx2_reuse_rx_skb(bp, skb,
1775 sw_ring_cons, sw_ring_prod);
1779 skb->protocol = eth_type_trans(skb, bp->dev);
1781 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1782 (htons(skb->protocol) != 0x8100)) {
1784 dev_kfree_skb_irq(skb);
1789 skb->ip_summed = CHECKSUM_NONE;
1791 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1792 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1794 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1795 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1796 skb->ip_summed = CHECKSUM_UNNECESSARY;
1800 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1801 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1802 rx_hdr->l2_fhdr_vlan_tag);
1806 netif_receive_skb(skb);
1808 bp->dev->last_rx = jiffies;
1812 sw_cons = NEXT_RX_BD(sw_cons);
1813 sw_prod = NEXT_RX_BD(sw_prod);
1815 if ((rx_pkt == budget))
1818 /* Refresh hw_cons to see if there is new work */
1819 if (sw_cons == hw_cons) {
1820 hw_cons = bp->hw_rx_cons =
1821 sblk->status_rx_quick_consumer_index0;
1822 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1827 bp->rx_cons = sw_cons;
1828 bp->rx_prod = sw_prod;
1830 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1832 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1840 /* MSI ISR - The only difference between this and the INTx ISR
1841 * is that the MSI interrupt is always serviced.
1844 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1846 struct net_device *dev = dev_instance;
1847 struct bnx2 *bp = netdev_priv(dev);
1849 prefetch(bp->status_blk);
1850 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1851 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1852 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1854 /* Return here if interrupt is disabled. */
1855 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1858 netif_rx_schedule(dev);
1864 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1866 struct net_device *dev = dev_instance;
1867 struct bnx2 *bp = netdev_priv(dev);
1869 /* When using INTx, it is possible for the interrupt to arrive
1870 * at the CPU before the status block posted prior to the
1871 * interrupt. Reading a register will flush the status block.
1872 * When using MSI, the MSI message will always complete after
1873 * the status block write.
1875 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1876 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1877 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1880 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1881 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1882 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1884 /* Return here if interrupt is shared and is disabled. */
1885 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1888 netif_rx_schedule(dev);
1894 bnx2_has_work(struct bnx2 *bp)
1896 struct status_block *sblk = bp->status_blk;
1898 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1899 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1902 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1910 bnx2_poll(struct net_device *dev, int *budget)
1912 struct bnx2 *bp = netdev_priv(dev);
1914 if ((bp->status_blk->status_attn_bits &
1915 STATUS_ATTN_BITS_LINK_STATE) !=
1916 (bp->status_blk->status_attn_bits_ack &
1917 STATUS_ATTN_BITS_LINK_STATE)) {
1919 spin_lock(&bp->phy_lock);
1921 spin_unlock(&bp->phy_lock);
1923 /* This is needed to take care of transient status
1924 * during link changes.
1926 REG_WR(bp, BNX2_HC_COMMAND,
1927 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1928 REG_RD(bp, BNX2_HC_COMMAND);
1931 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1934 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1935 int orig_budget = *budget;
1938 if (orig_budget > dev->quota)
1939 orig_budget = dev->quota;
1941 work_done = bnx2_rx_int(bp, orig_budget);
1942 *budget -= work_done;
1943 dev->quota -= work_done;
1946 bp->last_status_idx = bp->status_blk->status_idx;
1949 if (!bnx2_has_work(bp)) {
1950 netif_rx_complete(dev);
1951 if (likely(bp->flags & USING_MSI_FLAG)) {
1952 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1953 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1954 bp->last_status_idx);
1957 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1958 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1959 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1960 bp->last_status_idx);
1962 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1963 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1964 bp->last_status_idx);
1971 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1972 * from set_multicast.
1975 bnx2_set_rx_mode(struct net_device *dev)
1977 struct bnx2 *bp = netdev_priv(dev);
1978 u32 rx_mode, sort_mode;
1981 spin_lock_bh(&bp->phy_lock);
1983 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1984 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1985 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1987 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
1988 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1990 if (!(bp->flags & ASF_ENABLE_FLAG))
1991 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1993 if (dev->flags & IFF_PROMISC) {
1994 /* Promiscuous mode. */
1995 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
1996 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
1998 else if (dev->flags & IFF_ALLMULTI) {
1999 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2000 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2003 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2006 /* Accept one or more multicast(s). */
2007 struct dev_mc_list *mclist;
2008 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2013 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2015 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2016 i++, mclist = mclist->next) {
2018 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2020 regidx = (bit & 0xe0) >> 5;
2022 mc_filter[regidx] |= (1 << bit);
2025 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2026 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2030 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2033 if (rx_mode != bp->rx_mode) {
2034 bp->rx_mode = rx_mode;
2035 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2038 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2039 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2040 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2042 spin_unlock_bh(&bp->phy_lock);
2046 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2053 for (i = 0; i < rv2p_code_len; i += 8) {
2054 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2056 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2059 if (rv2p_proc == RV2P_PROC1) {
2060 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2061 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2064 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2065 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2069 /* Reset the processor, un-stall is done later. */
2070 if (rv2p_proc == RV2P_PROC1) {
2071 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2074 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2079 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2085 val = REG_RD_IND(bp, cpu_reg->mode);
2086 val |= cpu_reg->mode_value_halt;
2087 REG_WR_IND(bp, cpu_reg->mode, val);
2088 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2090 /* Load the Text area. */
2091 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2095 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2096 REG_WR_IND(bp, offset, fw->text[j]);
2100 /* Load the Data area. */
2101 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2105 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2106 REG_WR_IND(bp, offset, fw->data[j]);
2110 /* Load the SBSS area. */
2111 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2115 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2116 REG_WR_IND(bp, offset, fw->sbss[j]);
2120 /* Load the BSS area. */
2121 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2125 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2126 REG_WR_IND(bp, offset, fw->bss[j]);
2130 /* Load the Read-Only area. */
2131 offset = cpu_reg->spad_base +
2132 (fw->rodata_addr - cpu_reg->mips_view_base);
2136 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2137 REG_WR_IND(bp, offset, fw->rodata[j]);
2141 /* Clear the pre-fetch instruction. */
2142 REG_WR_IND(bp, cpu_reg->inst, 0);
2143 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2145 /* Start the CPU. */
2146 val = REG_RD_IND(bp, cpu_reg->mode);
2147 val &= ~cpu_reg->mode_value_halt;
2148 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2149 REG_WR_IND(bp, cpu_reg->mode, val);
2153 bnx2_init_cpus(struct bnx2 *bp)
2155 struct cpu_reg cpu_reg;
2158 /* Initialize the RV2P processor. */
2159 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2160 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2162 /* Initialize the RX Processor. */
2163 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2164 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2165 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2166 cpu_reg.state = BNX2_RXP_CPU_STATE;
2167 cpu_reg.state_value_clear = 0xffffff;
2168 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2169 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2170 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2171 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2172 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2173 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2174 cpu_reg.mips_view_base = 0x8000000;
2176 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2177 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2178 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2179 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2181 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2182 fw.text_len = bnx2_RXP_b06FwTextLen;
2184 fw.text = bnx2_RXP_b06FwText;
2186 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2187 fw.data_len = bnx2_RXP_b06FwDataLen;
2189 fw.data = bnx2_RXP_b06FwData;
2191 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2192 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2194 fw.sbss = bnx2_RXP_b06FwSbss;
2196 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2197 fw.bss_len = bnx2_RXP_b06FwBssLen;
2199 fw.bss = bnx2_RXP_b06FwBss;
2201 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2202 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2203 fw.rodata_index = 0;
2204 fw.rodata = bnx2_RXP_b06FwRodata;
2206 load_cpu_fw(bp, &cpu_reg, &fw);
2208 /* Initialize the TX Processor. */
2209 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2210 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2211 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2212 cpu_reg.state = BNX2_TXP_CPU_STATE;
2213 cpu_reg.state_value_clear = 0xffffff;
2214 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2215 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2216 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2217 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2218 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2219 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2220 cpu_reg.mips_view_base = 0x8000000;
2222 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2223 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2224 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2225 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2227 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2228 fw.text_len = bnx2_TXP_b06FwTextLen;
2230 fw.text = bnx2_TXP_b06FwText;
2232 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2233 fw.data_len = bnx2_TXP_b06FwDataLen;
2235 fw.data = bnx2_TXP_b06FwData;
2237 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2238 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2240 fw.sbss = bnx2_TXP_b06FwSbss;
2242 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2243 fw.bss_len = bnx2_TXP_b06FwBssLen;
2245 fw.bss = bnx2_TXP_b06FwBss;
2247 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2248 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2249 fw.rodata_index = 0;
2250 fw.rodata = bnx2_TXP_b06FwRodata;
2252 load_cpu_fw(bp, &cpu_reg, &fw);
2254 /* Initialize the TX Patch-up Processor. */
2255 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2256 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2257 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2258 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2259 cpu_reg.state_value_clear = 0xffffff;
2260 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2261 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2262 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2263 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2264 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2265 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2266 cpu_reg.mips_view_base = 0x8000000;
2268 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2269 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2270 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2271 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2273 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2274 fw.text_len = bnx2_TPAT_b06FwTextLen;
2276 fw.text = bnx2_TPAT_b06FwText;
2278 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2279 fw.data_len = bnx2_TPAT_b06FwDataLen;
2281 fw.data = bnx2_TPAT_b06FwData;
2283 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2284 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2286 fw.sbss = bnx2_TPAT_b06FwSbss;
2288 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2289 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2291 fw.bss = bnx2_TPAT_b06FwBss;
2293 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2294 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2295 fw.rodata_index = 0;
2296 fw.rodata = bnx2_TPAT_b06FwRodata;
2298 load_cpu_fw(bp, &cpu_reg, &fw);
2300 /* Initialize the Completion Processor. */
2301 cpu_reg.mode = BNX2_COM_CPU_MODE;
2302 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2303 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2304 cpu_reg.state = BNX2_COM_CPU_STATE;
2305 cpu_reg.state_value_clear = 0xffffff;
2306 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2307 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2308 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2309 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2310 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2311 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2312 cpu_reg.mips_view_base = 0x8000000;
2314 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2315 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2316 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2317 fw.start_addr = bnx2_COM_b06FwStartAddr;
2319 fw.text_addr = bnx2_COM_b06FwTextAddr;
2320 fw.text_len = bnx2_COM_b06FwTextLen;
2322 fw.text = bnx2_COM_b06FwText;
2324 fw.data_addr = bnx2_COM_b06FwDataAddr;
2325 fw.data_len = bnx2_COM_b06FwDataLen;
2327 fw.data = bnx2_COM_b06FwData;
2329 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2330 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2332 fw.sbss = bnx2_COM_b06FwSbss;
2334 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2335 fw.bss_len = bnx2_COM_b06FwBssLen;
2337 fw.bss = bnx2_COM_b06FwBss;
2339 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2340 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2341 fw.rodata_index = 0;
2342 fw.rodata = bnx2_COM_b06FwRodata;
2344 load_cpu_fw(bp, &cpu_reg, &fw);
2349 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2353 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2359 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2360 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2361 PCI_PM_CTRL_PME_STATUS);
2363 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2364 /* delay required during transition out of D3hot */
2367 val = REG_RD(bp, BNX2_EMAC_MODE);
2368 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2369 val &= ~BNX2_EMAC_MODE_MPKT;
2370 REG_WR(bp, BNX2_EMAC_MODE, val);
2372 val = REG_RD(bp, BNX2_RPM_CONFIG);
2373 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2374 REG_WR(bp, BNX2_RPM_CONFIG, val);
2385 autoneg = bp->autoneg;
2386 advertising = bp->advertising;
2388 bp->autoneg = AUTONEG_SPEED;
2389 bp->advertising = ADVERTISED_10baseT_Half |
2390 ADVERTISED_10baseT_Full |
2391 ADVERTISED_100baseT_Half |
2392 ADVERTISED_100baseT_Full |
2395 bnx2_setup_copper_phy(bp);
2397 bp->autoneg = autoneg;
2398 bp->advertising = advertising;
2400 bnx2_set_mac_addr(bp);
2402 val = REG_RD(bp, BNX2_EMAC_MODE);
2404 /* Enable port mode. */
2405 val &= ~BNX2_EMAC_MODE_PORT;
2406 val |= BNX2_EMAC_MODE_PORT_MII |
2407 BNX2_EMAC_MODE_MPKT_RCVD |
2408 BNX2_EMAC_MODE_ACPI_RCVD |
2409 BNX2_EMAC_MODE_MPKT;
2411 REG_WR(bp, BNX2_EMAC_MODE, val);
2413 /* receive all multicast */
2414 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2415 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2418 REG_WR(bp, BNX2_EMAC_RX_MODE,
2419 BNX2_EMAC_RX_MODE_SORT_MODE);
2421 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2422 BNX2_RPM_SORT_USER0_MC_EN;
2423 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2424 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2425 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2426 BNX2_RPM_SORT_USER0_ENA);
2428 /* Need to enable EMAC and RPM for WOL. */
2429 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2430 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2431 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2432 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2434 val = REG_RD(bp, BNX2_RPM_CONFIG);
2435 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2436 REG_WR(bp, BNX2_RPM_CONFIG, val);
2438 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2441 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2444 if (!(bp->flags & NO_WOL_FLAG))
2445 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2447 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2448 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2449 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2458 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2460 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2463 /* No more memory access after this point until
2464 * device is brought back to D0.
2476 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2481 /* Request access to the flash interface. */
2482 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2483 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2484 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2485 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2491 if (j >= NVRAM_TIMEOUT_COUNT)
2498 bnx2_release_nvram_lock(struct bnx2 *bp)
2503 /* Relinquish nvram interface. */
2504 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2506 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2507 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2508 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2514 if (j >= NVRAM_TIMEOUT_COUNT)
2522 bnx2_enable_nvram_write(struct bnx2 *bp)
2526 val = REG_RD(bp, BNX2_MISC_CFG);
2527 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2529 if (!bp->flash_info->buffered) {
2532 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2533 REG_WR(bp, BNX2_NVM_COMMAND,
2534 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2536 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2539 val = REG_RD(bp, BNX2_NVM_COMMAND);
2540 if (val & BNX2_NVM_COMMAND_DONE)
2544 if (j >= NVRAM_TIMEOUT_COUNT)
2551 bnx2_disable_nvram_write(struct bnx2 *bp)
2555 val = REG_RD(bp, BNX2_MISC_CFG);
2556 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2561 bnx2_enable_nvram_access(struct bnx2 *bp)
2565 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2566 /* Enable both bits, even on read. */
2567 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2568 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2572 bnx2_disable_nvram_access(struct bnx2 *bp)
2576 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2577 /* Disable both bits, even after read. */
2578 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2579 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2580 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2584 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2589 if (bp->flash_info->buffered)
2590 /* Buffered flash, no erase needed */
2593 /* Build an erase command */
2594 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2595 BNX2_NVM_COMMAND_DOIT;
2597 /* Need to clear DONE bit separately. */
2598 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2600 /* Address of the NVRAM to read from. */
2601 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2603 /* Issue an erase command. */
2604 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2606 /* Wait for completion. */
2607 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2612 val = REG_RD(bp, BNX2_NVM_COMMAND);
2613 if (val & BNX2_NVM_COMMAND_DONE)
2617 if (j >= NVRAM_TIMEOUT_COUNT)
2624 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2629 /* Build the command word. */
2630 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2632 /* Calculate an offset of a buffered flash. */
2633 if (bp->flash_info->buffered) {
2634 offset = ((offset / bp->flash_info->page_size) <<
2635 bp->flash_info->page_bits) +
2636 (offset % bp->flash_info->page_size);
2639 /* Need to clear DONE bit separately. */
2640 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2642 /* Address of the NVRAM to read from. */
2643 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2645 /* Issue a read command. */
2646 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2648 /* Wait for completion. */
2649 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2654 val = REG_RD(bp, BNX2_NVM_COMMAND);
2655 if (val & BNX2_NVM_COMMAND_DONE) {
2656 val = REG_RD(bp, BNX2_NVM_READ);
2658 val = be32_to_cpu(val);
2659 memcpy(ret_val, &val, 4);
2663 if (j >= NVRAM_TIMEOUT_COUNT)
2671 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2676 /* Build the command word. */
2677 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2679 /* Calculate an offset of a buffered flash. */
2680 if (bp->flash_info->buffered) {
2681 offset = ((offset / bp->flash_info->page_size) <<
2682 bp->flash_info->page_bits) +
2683 (offset % bp->flash_info->page_size);
2686 /* Need to clear DONE bit separately. */
2687 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2689 memcpy(&val32, val, 4);
2690 val32 = cpu_to_be32(val32);
2692 /* Write the data. */
2693 REG_WR(bp, BNX2_NVM_WRITE, val32);
2695 /* Address of the NVRAM to write to. */
2696 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2698 /* Issue the write command. */
2699 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2701 /* Wait for completion. */
2702 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2705 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2708 if (j >= NVRAM_TIMEOUT_COUNT)
2715 bnx2_init_nvram(struct bnx2 *bp)
2718 int j, entry_count, rc;
2719 struct flash_spec *flash;
2721 /* Determine the selected interface. */
2722 val = REG_RD(bp, BNX2_NVM_CFG1);
2724 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2727 if (val & 0x40000000) {
2729 /* Flash interface has been reconfigured */
2730 for (j = 0, flash = &flash_table[0]; j < entry_count;
2732 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2733 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2734 bp->flash_info = flash;
2741 /* Not yet been reconfigured */
2743 if (val & (1 << 23))
2744 mask = FLASH_BACKUP_STRAP_MASK;
2746 mask = FLASH_STRAP_MASK;
2748 for (j = 0, flash = &flash_table[0]; j < entry_count;
2751 if ((val & mask) == (flash->strapping & mask)) {
2752 bp->flash_info = flash;
2754 /* Request access to the flash interface. */
2755 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2758 /* Enable access to flash interface */
2759 bnx2_enable_nvram_access(bp);
2761 /* Reconfigure the flash interface */
2762 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2763 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2764 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2765 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2767 /* Disable access to flash interface */
2768 bnx2_disable_nvram_access(bp);
2769 bnx2_release_nvram_lock(bp);
2774 } /* if (val & 0x40000000) */
2776 if (j == entry_count) {
2777 bp->flash_info = NULL;
2778 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2782 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2783 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2785 bp->flash_size = val;
2787 bp->flash_size = bp->flash_info->total_size;
2793 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2797 u32 cmd_flags, offset32, len32, extra;
2802 /* Request access to the flash interface. */
2803 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2806 /* Enable access to flash interface */
2807 bnx2_enable_nvram_access(bp);
2820 pre_len = 4 - (offset & 3);
2822 if (pre_len >= len32) {
2824 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2825 BNX2_NVM_COMMAND_LAST;
2828 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2831 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2836 memcpy(ret_buf, buf + (offset & 3), pre_len);
2843 extra = 4 - (len32 & 3);
2844 len32 = (len32 + 4) & ~3;
2851 cmd_flags = BNX2_NVM_COMMAND_LAST;
2853 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2854 BNX2_NVM_COMMAND_LAST;
2856 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2858 memcpy(ret_buf, buf, 4 - extra);
2860 else if (len32 > 0) {
2863 /* Read the first word. */
2867 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2869 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2871 /* Advance to the next dword. */
2876 while (len32 > 4 && rc == 0) {
2877 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2879 /* Advance to the next dword. */
2888 cmd_flags = BNX2_NVM_COMMAND_LAST;
2889 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2891 memcpy(ret_buf, buf, 4 - extra);
2894 /* Disable access to flash interface */
2895 bnx2_disable_nvram_access(bp);
2897 bnx2_release_nvram_lock(bp);
2903 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2906 u32 written, offset32, len32;
2907 u8 *buf, start[4], end[4];
2909 int align_start, align_end;
2914 align_start = align_end = 0;
2916 if ((align_start = (offset32 & 3))) {
2918 len32 += align_start;
2919 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2924 if ((len32 > 4) || !align_start) {
2925 align_end = 4 - (len32 & 3);
2927 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2934 if (align_start || align_end) {
2935 buf = kmalloc(len32, GFP_KERNEL);
2939 memcpy(buf, start, 4);
2942 memcpy(buf + len32 - 4, end, 4);
2944 memcpy(buf + align_start, data_buf, buf_size);
2948 while ((written < len32) && (rc == 0)) {
2949 u32 page_start, page_end, data_start, data_end;
2950 u32 addr, cmd_flags;
2952 u8 flash_buffer[264];
2954 /* Find the page_start addr */
2955 page_start = offset32 + written;
2956 page_start -= (page_start % bp->flash_info->page_size);
2957 /* Find the page_end addr */
2958 page_end = page_start + bp->flash_info->page_size;
2959 /* Find the data_start addr */
2960 data_start = (written == 0) ? offset32 : page_start;
2961 /* Find the data_end addr */
2962 data_end = (page_end > offset32 + len32) ?
2963 (offset32 + len32) : page_end;
2965 /* Request access to the flash interface. */
2966 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2967 goto nvram_write_end;
2969 /* Enable access to flash interface */
2970 bnx2_enable_nvram_access(bp);
2972 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2973 if (bp->flash_info->buffered == 0) {
2976 /* Read the whole page into the buffer
2977 * (non-buffer flash only) */
2978 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2979 if (j == (bp->flash_info->page_size - 4)) {
2980 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2982 rc = bnx2_nvram_read_dword(bp,
2988 goto nvram_write_end;
2994 /* Enable writes to flash interface (unlock write-protect) */
2995 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
2996 goto nvram_write_end;
2998 /* Erase the page */
2999 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3000 goto nvram_write_end;
3002 /* Re-enable the write again for the actual write */
3003 bnx2_enable_nvram_write(bp);
3005 /* Loop to write back the buffer data from page_start to
3008 if (bp->flash_info->buffered == 0) {
3009 for (addr = page_start; addr < data_start;
3010 addr += 4, i += 4) {
3012 rc = bnx2_nvram_write_dword(bp, addr,
3013 &flash_buffer[i], cmd_flags);
3016 goto nvram_write_end;
3022 /* Loop to write the new data from data_start to data_end */
3023 for (addr = data_start; addr < data_end; addr += 4, i++) {
3024 if ((addr == page_end - 4) ||
3025 ((bp->flash_info->buffered) &&
3026 (addr == data_end - 4))) {
3028 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3030 rc = bnx2_nvram_write_dword(bp, addr, buf,
3034 goto nvram_write_end;
3040 /* Loop to write back the buffer data from data_end
3042 if (bp->flash_info->buffered == 0) {
3043 for (addr = data_end; addr < page_end;
3044 addr += 4, i += 4) {
3046 if (addr == page_end-4) {
3047 cmd_flags = BNX2_NVM_COMMAND_LAST;
3049 rc = bnx2_nvram_write_dword(bp, addr,
3050 &flash_buffer[i], cmd_flags);
3053 goto nvram_write_end;
3059 /* Disable writes to flash interface (lock write-protect) */
3060 bnx2_disable_nvram_write(bp);
3062 /* Disable access to flash interface */
3063 bnx2_disable_nvram_access(bp);
3064 bnx2_release_nvram_lock(bp);
3066 /* Increment written */
3067 written += data_end - data_start;
3071 if (align_start || align_end)
3077 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3082 /* Wait for the current PCI transaction to complete before
3083 * issuing a reset. */
3084 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3085 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3086 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3087 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3088 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3089 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3092 /* Wait for the firmware to tell us it is ok to issue a reset. */
3093 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3095 /* Deposit a driver reset signature so the firmware knows that
3096 * this is a soft reset. */
3097 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3098 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3100 /* Do a dummy read to force the chip to complete all current transaction
3101 * before we issue a reset. */
3102 val = REG_RD(bp, BNX2_MISC_ID);
3104 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3105 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3106 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3109 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3111 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3112 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3115 /* Reset takes approximate 30 usec */
3116 for (i = 0; i < 10; i++) {
3117 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3118 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3119 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3125 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3126 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3127 printk(KERN_ERR PFX "Chip reset did not complete\n");
3131 /* Make sure byte swapping is properly configured. */
3132 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3133 if (val != 0x01020304) {
3134 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3138 /* Wait for the firmware to finish its initialization. */
3139 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3143 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3144 /* Adjust the voltage regular to two steps lower. The default
3145 * of this register is 0x0000000e. */
3146 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3148 /* Remove bad rbuf memory from the free pool. */
3149 rc = bnx2_alloc_bad_rbuf(bp);
3156 bnx2_init_chip(struct bnx2 *bp)
3161 /* Make sure the interrupt is not active. */
3162 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3164 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3165 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3167 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3169 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3170 DMA_READ_CHANS << 12 |
3171 DMA_WRITE_CHANS << 16;
3173 val |= (0x2 << 20) | (1 << 11);
3175 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3178 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3179 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3180 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3182 REG_WR(bp, BNX2_DMA_CONFIG, val);
3184 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3185 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3186 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3187 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3190 if (bp->flags & PCIX_FLAG) {
3193 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3195 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3196 val16 & ~PCI_X_CMD_ERO);
3199 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3200 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3201 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3202 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3204 /* Initialize context mapping and zero out the quick contexts. The
3205 * context block must have already been enabled. */
3206 bnx2_init_context(bp);
3209 bnx2_init_nvram(bp);
3211 bnx2_set_mac_addr(bp);
3213 val = REG_RD(bp, BNX2_MQ_CONFIG);
3214 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3215 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3216 REG_WR(bp, BNX2_MQ_CONFIG, val);
3218 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3219 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3220 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3222 val = (BCM_PAGE_BITS - 8) << 24;
3223 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3225 /* Configure page size. */
3226 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3227 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3228 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3229 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3231 val = bp->mac_addr[0] +
3232 (bp->mac_addr[1] << 8) +
3233 (bp->mac_addr[2] << 16) +
3235 (bp->mac_addr[4] << 8) +
3236 (bp->mac_addr[5] << 16);
3237 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3239 /* Program the MTU. Also include 4 bytes for CRC32. */
3240 val = bp->dev->mtu + ETH_HLEN + 4;
3241 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3242 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3243 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3245 bp->last_status_idx = 0;
3246 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3248 /* Set up how to generate a link change interrupt. */
3249 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3251 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3252 (u64) bp->status_blk_mapping & 0xffffffff);
3253 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3255 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3256 (u64) bp->stats_blk_mapping & 0xffffffff);
3257 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3258 (u64) bp->stats_blk_mapping >> 32);
3260 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3261 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3263 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3264 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3266 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3267 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3269 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3271 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3273 REG_WR(bp, BNX2_HC_COM_TICKS,
3274 (bp->com_ticks_int << 16) | bp->com_ticks);
3276 REG_WR(bp, BNX2_HC_CMD_TICKS,
3277 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3279 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3280 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3282 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3283 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3285 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3286 BNX2_HC_CONFIG_TX_TMR_MODE |
3287 BNX2_HC_CONFIG_COLLECT_STATS);
3290 /* Clear internal stats counters. */
3291 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3293 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3295 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3296 BNX2_PORT_FEATURE_ASF_ENABLED)
3297 bp->flags |= ASF_ENABLE_FLAG;
3299 /* Initialize the receive filter. */
3300 bnx2_set_rx_mode(bp->dev);
3302 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3305 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3306 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3310 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3317 bnx2_init_tx_ring(struct bnx2 *bp)
3322 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3324 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3325 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3330 bp->tx_prod_bseq = 0;
3332 val = BNX2_L2CTX_TYPE_TYPE_L2;
3333 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3334 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3336 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3338 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3340 val = (u64) bp->tx_desc_mapping >> 32;
3341 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3343 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3344 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3348 bnx2_init_rx_ring(struct bnx2 *bp)
3352 u16 prod, ring_prod;
3355 /* 8 for CRC and VLAN */
3356 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3357 /* 8 for alignment */
3358 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3360 ring_prod = prod = bp->rx_prod = 0;
3363 bp->rx_prod_bseq = 0;
3365 for (i = 0; i < bp->rx_max_ring; i++) {
3368 rxbd = &bp->rx_desc_ring[i][0];
3369 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3370 rxbd->rx_bd_len = bp->rx_buf_use_size;
3371 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3373 if (i == (bp->rx_max_ring - 1))
3377 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3378 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3382 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3383 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3385 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3387 val = (u64) bp->rx_desc_mapping[0] >> 32;
3388 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3390 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3391 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3393 for (i = 0; i < bp->rx_ring_size; i++) {
3394 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3397 prod = NEXT_RX_BD(prod);
3398 ring_prod = RX_RING_IDX(prod);
3402 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3404 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3408 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3412 bp->rx_ring_size = size;
3414 while (size > MAX_RX_DESC_CNT) {
3415 size -= MAX_RX_DESC_CNT;
3418 /* round to next power of 2 */
3420 while ((max & num_rings) == 0)
3423 if (num_rings != max)
3426 bp->rx_max_ring = max;
3427 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3431 bnx2_free_tx_skbs(struct bnx2 *bp)
3435 if (bp->tx_buf_ring == NULL)
3438 for (i = 0; i < TX_DESC_CNT; ) {
3439 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3440 struct sk_buff *skb = tx_buf->skb;
3448 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3449 skb_headlen(skb), PCI_DMA_TODEVICE);
3453 last = skb_shinfo(skb)->nr_frags;
3454 for (j = 0; j < last; j++) {
3455 tx_buf = &bp->tx_buf_ring[i + j + 1];
3456 pci_unmap_page(bp->pdev,
3457 pci_unmap_addr(tx_buf, mapping),
3458 skb_shinfo(skb)->frags[j].size,
3461 dev_kfree_skb_any(skb);
3468 bnx2_free_rx_skbs(struct bnx2 *bp)
3472 if (bp->rx_buf_ring == NULL)
3475 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3476 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3477 struct sk_buff *skb = rx_buf->skb;
3482 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3483 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3487 dev_kfree_skb_any(skb);
3492 bnx2_free_skbs(struct bnx2 *bp)
3494 bnx2_free_tx_skbs(bp);
3495 bnx2_free_rx_skbs(bp);
3499 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3503 rc = bnx2_reset_chip(bp, reset_code);
3509 bnx2_init_tx_ring(bp);
3510 bnx2_init_rx_ring(bp);
3515 bnx2_init_nic(struct bnx2 *bp)
3519 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3528 bnx2_test_registers(struct bnx2 *bp)
3532 static const struct {
3538 { 0x006c, 0, 0x00000000, 0x0000003f },
3539 { 0x0090, 0, 0xffffffff, 0x00000000 },
3540 { 0x0094, 0, 0x00000000, 0x00000000 },
3542 { 0x0404, 0, 0x00003f00, 0x00000000 },
3543 { 0x0418, 0, 0x00000000, 0xffffffff },
3544 { 0x041c, 0, 0x00000000, 0xffffffff },
3545 { 0x0420, 0, 0x00000000, 0x80ffffff },
3546 { 0x0424, 0, 0x00000000, 0x00000000 },
3547 { 0x0428, 0, 0x00000000, 0x00000001 },
3548 { 0x0450, 0, 0x00000000, 0x0000ffff },
3549 { 0x0454, 0, 0x00000000, 0xffffffff },
3550 { 0x0458, 0, 0x00000000, 0xffffffff },
3552 { 0x0808, 0, 0x00000000, 0xffffffff },
3553 { 0x0854, 0, 0x00000000, 0xffffffff },
3554 { 0x0868, 0, 0x00000000, 0x77777777 },
3555 { 0x086c, 0, 0x00000000, 0x77777777 },
3556 { 0x0870, 0, 0x00000000, 0x77777777 },
3557 { 0x0874, 0, 0x00000000, 0x77777777 },
3559 { 0x0c00, 0, 0x00000000, 0x00000001 },
3560 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3561 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3563 { 0x1000, 0, 0x00000000, 0x00000001 },
3564 { 0x1004, 0, 0x00000000, 0x000f0001 },
3566 { 0x1408, 0, 0x01c00800, 0x00000000 },
3567 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3568 { 0x14a8, 0, 0x00000000, 0x000001ff },
3569 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3570 { 0x14b0, 0, 0x00000002, 0x00000001 },
3571 { 0x14b8, 0, 0x00000000, 0x00000000 },
3572 { 0x14c0, 0, 0x00000000, 0x00000009 },
3573 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3574 { 0x14cc, 0, 0x00000000, 0x00000001 },
3575 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3577 { 0x1800, 0, 0x00000000, 0x00000001 },
3578 { 0x1804, 0, 0x00000000, 0x00000003 },
3580 { 0x2800, 0, 0x00000000, 0x00000001 },
3581 { 0x2804, 0, 0x00000000, 0x00003f01 },
3582 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3583 { 0x2810, 0, 0xffff0000, 0x00000000 },
3584 { 0x2814, 0, 0xffff0000, 0x00000000 },
3585 { 0x2818, 0, 0xffff0000, 0x00000000 },
3586 { 0x281c, 0, 0xffff0000, 0x00000000 },
3587 { 0x2834, 0, 0xffffffff, 0x00000000 },
3588 { 0x2840, 0, 0x00000000, 0xffffffff },
3589 { 0x2844, 0, 0x00000000, 0xffffffff },
3590 { 0x2848, 0, 0xffffffff, 0x00000000 },
3591 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3593 { 0x2c00, 0, 0x00000000, 0x00000011 },
3594 { 0x2c04, 0, 0x00000000, 0x00030007 },
3596 { 0x3c00, 0, 0x00000000, 0x00000001 },
3597 { 0x3c04, 0, 0x00000000, 0x00070000 },
3598 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3599 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3600 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3601 { 0x3c14, 0, 0x00000000, 0xffffffff },
3602 { 0x3c18, 0, 0x00000000, 0xffffffff },
3603 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3604 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3606 { 0x5004, 0, 0x00000000, 0x0000007f },
3607 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3608 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3610 { 0x5c00, 0, 0x00000000, 0x00000001 },
3611 { 0x5c04, 0, 0x00000000, 0x0003000f },
3612 { 0x5c08, 0, 0x00000003, 0x00000000 },
3613 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3614 { 0x5c10, 0, 0x00000000, 0xffffffff },
3615 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3616 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3617 { 0x5c88, 0, 0x00000000, 0x00077373 },
3618 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3620 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3621 { 0x680c, 0, 0xffffffff, 0x00000000 },
3622 { 0x6810, 0, 0xffffffff, 0x00000000 },
3623 { 0x6814, 0, 0xffffffff, 0x00000000 },
3624 { 0x6818, 0, 0xffffffff, 0x00000000 },
3625 { 0x681c, 0, 0xffffffff, 0x00000000 },
3626 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3627 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3628 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3629 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3630 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3631 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3632 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3633 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3634 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3635 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3636 { 0x684c, 0, 0xffffffff, 0x00000000 },
3637 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3638 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3639 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3640 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3641 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3642 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3644 { 0xffff, 0, 0x00000000, 0x00000000 },
3648 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3649 u32 offset, rw_mask, ro_mask, save_val, val;
3651 offset = (u32) reg_tbl[i].offset;
3652 rw_mask = reg_tbl[i].rw_mask;
3653 ro_mask = reg_tbl[i].ro_mask;
3655 save_val = readl(bp->regview + offset);
3657 writel(0, bp->regview + offset);
3659 val = readl(bp->regview + offset);
3660 if ((val & rw_mask) != 0) {
3664 if ((val & ro_mask) != (save_val & ro_mask)) {
3668 writel(0xffffffff, bp->regview + offset);
3670 val = readl(bp->regview + offset);
3671 if ((val & rw_mask) != rw_mask) {
3675 if ((val & ro_mask) != (save_val & ro_mask)) {
3679 writel(save_val, bp->regview + offset);
3683 writel(save_val, bp->regview + offset);
3691 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3693 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3694 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3697 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3700 for (offset = 0; offset < size; offset += 4) {
3702 REG_WR_IND(bp, start + offset, test_pattern[i]);
3704 if (REG_RD_IND(bp, start + offset) !=
3714 bnx2_test_memory(struct bnx2 *bp)
3718 static const struct {
3722 { 0x60000, 0x4000 },
3723 { 0xa0000, 0x3000 },
3724 { 0xe0000, 0x4000 },
3725 { 0x120000, 0x4000 },
3726 { 0x1a0000, 0x4000 },
3727 { 0x160000, 0x4000 },
3731 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3732 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3733 mem_tbl[i].len)) != 0) {
3741 #define BNX2_MAC_LOOPBACK 0
3742 #define BNX2_PHY_LOOPBACK 1
3745 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3747 unsigned int pkt_size, num_pkts, i;
3748 struct sk_buff *skb, *rx_skb;
3749 unsigned char *packet;
3750 u16 rx_start_idx, rx_idx;
3753 struct sw_bd *rx_buf;
3754 struct l2_fhdr *rx_hdr;
3757 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3758 bp->loopback = MAC_LOOPBACK;
3759 bnx2_set_mac_loopback(bp);
3761 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3763 bnx2_set_phy_loopback(bp);
3769 skb = dev_alloc_skb(pkt_size);
3772 packet = skb_put(skb, pkt_size);
3773 memcpy(packet, bp->mac_addr, 6);
3774 memset(packet + 6, 0x0, 8);
3775 for (i = 14; i < pkt_size; i++)
3776 packet[i] = (unsigned char) (i & 0xff);
3778 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3781 REG_WR(bp, BNX2_HC_COMMAND,
3782 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3784 REG_RD(bp, BNX2_HC_COMMAND);
3787 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3791 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3793 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3794 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3795 txbd->tx_bd_mss_nbytes = pkt_size;
3796 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3799 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3800 bp->tx_prod_bseq += pkt_size;
3802 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3803 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3807 REG_WR(bp, BNX2_HC_COMMAND,
3808 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3810 REG_RD(bp, BNX2_HC_COMMAND);
3814 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3815 dev_kfree_skb_irq(skb);
3817 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3818 goto loopback_test_done;
3821 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3822 if (rx_idx != rx_start_idx + num_pkts) {
3823 goto loopback_test_done;
3826 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3827 rx_skb = rx_buf->skb;
3829 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3830 skb_reserve(rx_skb, bp->rx_offset);
3832 pci_dma_sync_single_for_cpu(bp->pdev,
3833 pci_unmap_addr(rx_buf, mapping),
3834 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3836 if (rx_hdr->l2_fhdr_status &
3837 (L2_FHDR_ERRORS_BAD_CRC |
3838 L2_FHDR_ERRORS_PHY_DECODE |
3839 L2_FHDR_ERRORS_ALIGNMENT |
3840 L2_FHDR_ERRORS_TOO_SHORT |
3841 L2_FHDR_ERRORS_GIANT_FRAME)) {
3843 goto loopback_test_done;
3846 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3847 goto loopback_test_done;
3850 for (i = 14; i < pkt_size; i++) {
3851 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3852 goto loopback_test_done;
3863 #define BNX2_MAC_LOOPBACK_FAILED 1
3864 #define BNX2_PHY_LOOPBACK_FAILED 2
3865 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3866 BNX2_PHY_LOOPBACK_FAILED)
3869 bnx2_test_loopback(struct bnx2 *bp)
3873 if (!netif_running(bp->dev))
3874 return BNX2_LOOPBACK_FAILED;
3876 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3877 spin_lock_bh(&bp->phy_lock);
3879 spin_unlock_bh(&bp->phy_lock);
3880 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3881 rc |= BNX2_MAC_LOOPBACK_FAILED;
3882 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3883 rc |= BNX2_PHY_LOOPBACK_FAILED;
3887 #define NVRAM_SIZE 0x200
3888 #define CRC32_RESIDUAL 0xdebb20e3
3891 bnx2_test_nvram(struct bnx2 *bp)
3893 u32 buf[NVRAM_SIZE / 4];
3894 u8 *data = (u8 *) buf;
3898 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3899 goto test_nvram_done;
3901 magic = be32_to_cpu(buf[0]);
3902 if (magic != 0x669955aa) {
3904 goto test_nvram_done;
3907 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3908 goto test_nvram_done;
3910 csum = ether_crc_le(0x100, data);
3911 if (csum != CRC32_RESIDUAL) {
3913 goto test_nvram_done;
3916 csum = ether_crc_le(0x100, data + 0x100);
3917 if (csum != CRC32_RESIDUAL) {
3926 bnx2_test_link(struct bnx2 *bp)
3930 spin_lock_bh(&bp->phy_lock);
3931 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3932 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3933 spin_unlock_bh(&bp->phy_lock);
3935 if (bmsr & BMSR_LSTATUS) {
3942 bnx2_test_intr(struct bnx2 *bp)
3947 if (!netif_running(bp->dev))
3950 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3952 /* This register is not touched during run-time. */
3953 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
3954 REG_RD(bp, BNX2_HC_COMMAND);
3956 for (i = 0; i < 10; i++) {
3957 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3963 msleep_interruptible(10);
3972 bnx2_timer(unsigned long data)
3974 struct bnx2 *bp = (struct bnx2 *) data;
3977 if (!netif_running(bp->dev))
3980 if (atomic_read(&bp->intr_sem) != 0)
3981 goto bnx2_restart_timer;
3983 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3984 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
3986 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3987 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3989 spin_lock(&bp->phy_lock);
3990 if (bp->serdes_an_pending) {
3991 bp->serdes_an_pending--;
3993 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
3996 bp->current_interval = bp->timer_interval;
3998 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4000 if (bmcr & BMCR_ANENABLE) {
4003 bnx2_write_phy(bp, 0x1c, 0x7c00);
4004 bnx2_read_phy(bp, 0x1c, &phy1);
4006 bnx2_write_phy(bp, 0x17, 0x0f01);
4007 bnx2_read_phy(bp, 0x15, &phy2);
4008 bnx2_write_phy(bp, 0x17, 0x0f01);
4009 bnx2_read_phy(bp, 0x15, &phy2);
4011 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4012 !(phy2 & 0x20)) { /* no CONFIG */
4014 bmcr &= ~BMCR_ANENABLE;
4015 bmcr |= BMCR_SPEED1000 |
4017 bnx2_write_phy(bp, MII_BMCR, bmcr);
4019 PHY_PARALLEL_DETECT_FLAG;
4023 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4024 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4027 bnx2_write_phy(bp, 0x17, 0x0f01);
4028 bnx2_read_phy(bp, 0x15, &phy2);
4032 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4033 bmcr |= BMCR_ANENABLE;
4034 bnx2_write_phy(bp, MII_BMCR, bmcr);
4036 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4041 bp->current_interval = bp->timer_interval;
4043 spin_unlock(&bp->phy_lock);
4047 mod_timer(&bp->timer, jiffies + bp->current_interval);
4050 /* Called with rtnl_lock */
4052 bnx2_open(struct net_device *dev)
4054 struct bnx2 *bp = netdev_priv(dev);
4057 bnx2_set_power_state(bp, PCI_D0);
4058 bnx2_disable_int(bp);
4060 rc = bnx2_alloc_mem(bp);
4064 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4065 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4068 if (pci_enable_msi(bp->pdev) == 0) {
4069 bp->flags |= USING_MSI_FLAG;
4070 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4074 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4075 SA_SHIRQ, dev->name, dev);
4079 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4087 rc = bnx2_init_nic(bp);
4090 free_irq(bp->pdev->irq, dev);
4091 if (bp->flags & USING_MSI_FLAG) {
4092 pci_disable_msi(bp->pdev);
4093 bp->flags &= ~USING_MSI_FLAG;
4100 mod_timer(&bp->timer, jiffies + bp->current_interval);
4102 atomic_set(&bp->intr_sem, 0);
4104 bnx2_enable_int(bp);
4106 if (bp->flags & USING_MSI_FLAG) {
4107 /* Test MSI to make sure it is working
4108 * If MSI test fails, go back to INTx mode
4110 if (bnx2_test_intr(bp) != 0) {
4111 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4112 " using MSI, switching to INTx mode. Please"
4113 " report this failure to the PCI maintainer"
4114 " and include system chipset information.\n",
4117 bnx2_disable_int(bp);
4118 free_irq(bp->pdev->irq, dev);
4119 pci_disable_msi(bp->pdev);
4120 bp->flags &= ~USING_MSI_FLAG;
4122 rc = bnx2_init_nic(bp);
4125 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4126 SA_SHIRQ, dev->name, dev);
4131 del_timer_sync(&bp->timer);
4134 bnx2_enable_int(bp);
4137 if (bp->flags & USING_MSI_FLAG) {
4138 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4141 netif_start_queue(dev);
4147 bnx2_reset_task(void *data)
4149 struct bnx2 *bp = data;
4151 if (!netif_running(bp->dev))
4154 bp->in_reset_task = 1;
4155 bnx2_netif_stop(bp);
4159 atomic_set(&bp->intr_sem, 1);
4160 bnx2_netif_start(bp);
4161 bp->in_reset_task = 0;
4165 bnx2_tx_timeout(struct net_device *dev)
4167 struct bnx2 *bp = netdev_priv(dev);
4169 /* This allows the netif to be shutdown gracefully before resetting */
4170 schedule_work(&bp->reset_task);
4174 /* Called with rtnl_lock */
4176 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4178 struct bnx2 *bp = netdev_priv(dev);
4180 bnx2_netif_stop(bp);
4183 bnx2_set_rx_mode(dev);
4185 bnx2_netif_start(bp);
4188 /* Called with rtnl_lock */
4190 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4192 struct bnx2 *bp = netdev_priv(dev);
4194 bnx2_netif_stop(bp);
4197 bp->vlgrp->vlan_devices[vid] = NULL;
4198 bnx2_set_rx_mode(dev);
4200 bnx2_netif_start(bp);
4204 /* Called with dev->xmit_lock.
4205 * hard_start_xmit is pseudo-lockless - a lock is only required when
4206 * the tx queue is full. This way, we get the benefit of lockless
4207 * operations most of the time without the complexities to handle
4208 * netif_stop_queue/wake_queue race conditions.
4211 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4213 struct bnx2 *bp = netdev_priv(dev);
4216 struct sw_bd *tx_buf;
4217 u32 len, vlan_tag_flags, last_frag, mss;
4218 u16 prod, ring_prod;
4221 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4222 netif_stop_queue(dev);
4223 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4226 return NETDEV_TX_BUSY;
4228 len = skb_headlen(skb);
4230 ring_prod = TX_RING_IDX(prod);
4233 if (skb->ip_summed == CHECKSUM_HW) {
4234 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4237 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4239 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4242 if ((mss = skb_shinfo(skb)->tso_size) &&
4243 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4244 u32 tcp_opt_len, ip_tcp_len;
4246 if (skb_header_cloned(skb) &&
4247 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4249 return NETDEV_TX_OK;
4252 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4253 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4256 if (skb->h.th->doff > 5) {
4257 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4259 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4261 skb->nh.iph->check = 0;
4262 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4264 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4268 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4269 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4270 (tcp_opt_len >> 2)) << 8;
4279 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4281 tx_buf = &bp->tx_buf_ring[ring_prod];
4283 pci_unmap_addr_set(tx_buf, mapping, mapping);
4285 txbd = &bp->tx_desc_ring[ring_prod];
4287 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4288 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4289 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4290 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4292 last_frag = skb_shinfo(skb)->nr_frags;
4294 for (i = 0; i < last_frag; i++) {
4295 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4297 prod = NEXT_TX_BD(prod);
4298 ring_prod = TX_RING_IDX(prod);
4299 txbd = &bp->tx_desc_ring[ring_prod];
4302 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4303 len, PCI_DMA_TODEVICE);
4304 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4307 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4308 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4309 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4310 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4313 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4315 prod = NEXT_TX_BD(prod);
4316 bp->tx_prod_bseq += skb->len;
4318 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4319 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4324 dev->trans_start = jiffies;
4326 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4327 spin_lock(&bp->tx_lock);
4328 netif_stop_queue(dev);
4330 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4331 netif_wake_queue(dev);
4332 spin_unlock(&bp->tx_lock);
4335 return NETDEV_TX_OK;
4338 /* Called with rtnl_lock */
4340 bnx2_close(struct net_device *dev)
4342 struct bnx2 *bp = netdev_priv(dev);
4345 /* Calling flush_scheduled_work() may deadlock because
4346 * linkwatch_event() may be on the workqueue and it will try to get
4347 * the rtnl_lock which we are holding.
4349 while (bp->in_reset_task)
4352 bnx2_netif_stop(bp);
4353 del_timer_sync(&bp->timer);
4354 if (bp->flags & NO_WOL_FLAG)
4355 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4357 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4359 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4360 bnx2_reset_chip(bp, reset_code);
4361 free_irq(bp->pdev->irq, dev);
4362 if (bp->flags & USING_MSI_FLAG) {
4363 pci_disable_msi(bp->pdev);
4364 bp->flags &= ~USING_MSI_FLAG;
4369 netif_carrier_off(bp->dev);
4370 bnx2_set_power_state(bp, PCI_D3hot);
4374 #define GET_NET_STATS64(ctr) \
4375 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4376 (unsigned long) (ctr##_lo)
4378 #define GET_NET_STATS32(ctr) \
4381 #if (BITS_PER_LONG == 64)
4382 #define GET_NET_STATS GET_NET_STATS64
4384 #define GET_NET_STATS GET_NET_STATS32
4387 static struct net_device_stats *
4388 bnx2_get_stats(struct net_device *dev)
4390 struct bnx2 *bp = netdev_priv(dev);
4391 struct statistics_block *stats_blk = bp->stats_blk;
4392 struct net_device_stats *net_stats = &bp->net_stats;
4394 if (bp->stats_blk == NULL) {
4397 net_stats->rx_packets =
4398 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4399 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4400 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4402 net_stats->tx_packets =
4403 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4404 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4405 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4407 net_stats->rx_bytes =
4408 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4410 net_stats->tx_bytes =
4411 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4413 net_stats->multicast =
4414 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4416 net_stats->collisions =
4417 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4419 net_stats->rx_length_errors =
4420 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4421 stats_blk->stat_EtherStatsOverrsizePkts);
4423 net_stats->rx_over_errors =
4424 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4426 net_stats->rx_frame_errors =
4427 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4429 net_stats->rx_crc_errors =
4430 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4432 net_stats->rx_errors = net_stats->rx_length_errors +
4433 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4434 net_stats->rx_crc_errors;
4436 net_stats->tx_aborted_errors =
4437 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4438 stats_blk->stat_Dot3StatsLateCollisions);
4440 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4441 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4442 net_stats->tx_carrier_errors = 0;
4444 net_stats->tx_carrier_errors =
4446 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4449 net_stats->tx_errors =
4451 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4453 net_stats->tx_aborted_errors +
4454 net_stats->tx_carrier_errors;
4459 /* All ethtool functions called with rtnl_lock */
4462 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4464 struct bnx2 *bp = netdev_priv(dev);
4466 cmd->supported = SUPPORTED_Autoneg;
4467 if (bp->phy_flags & PHY_SERDES_FLAG) {
4468 cmd->supported |= SUPPORTED_1000baseT_Full |
4471 cmd->port = PORT_FIBRE;
4474 cmd->supported |= SUPPORTED_10baseT_Half |
4475 SUPPORTED_10baseT_Full |
4476 SUPPORTED_100baseT_Half |
4477 SUPPORTED_100baseT_Full |
4478 SUPPORTED_1000baseT_Full |
4481 cmd->port = PORT_TP;
4484 cmd->advertising = bp->advertising;
4486 if (bp->autoneg & AUTONEG_SPEED) {
4487 cmd->autoneg = AUTONEG_ENABLE;
4490 cmd->autoneg = AUTONEG_DISABLE;
4493 if (netif_carrier_ok(dev)) {
4494 cmd->speed = bp->line_speed;
4495 cmd->duplex = bp->duplex;
4502 cmd->transceiver = XCVR_INTERNAL;
4503 cmd->phy_address = bp->phy_addr;
4509 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4511 struct bnx2 *bp = netdev_priv(dev);
4512 u8 autoneg = bp->autoneg;
4513 u8 req_duplex = bp->req_duplex;
4514 u16 req_line_speed = bp->req_line_speed;
4515 u32 advertising = bp->advertising;
4517 if (cmd->autoneg == AUTONEG_ENABLE) {
4518 autoneg |= AUTONEG_SPEED;
4520 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4522 /* allow advertising 1 speed */
4523 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4524 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4525 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4526 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4528 if (bp->phy_flags & PHY_SERDES_FLAG)
4531 advertising = cmd->advertising;
4534 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4535 advertising = cmd->advertising;
4537 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4541 if (bp->phy_flags & PHY_SERDES_FLAG) {
4542 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4545 advertising = ETHTOOL_ALL_COPPER_SPEED;
4548 advertising |= ADVERTISED_Autoneg;
4551 if (bp->phy_flags & PHY_SERDES_FLAG) {
4552 if ((cmd->speed != SPEED_1000) ||
4553 (cmd->duplex != DUPLEX_FULL)) {
4557 else if (cmd->speed == SPEED_1000) {
4560 autoneg &= ~AUTONEG_SPEED;
4561 req_line_speed = cmd->speed;
4562 req_duplex = cmd->duplex;
4566 bp->autoneg = autoneg;
4567 bp->advertising = advertising;
4568 bp->req_line_speed = req_line_speed;
4569 bp->req_duplex = req_duplex;
4571 spin_lock_bh(&bp->phy_lock);
4575 spin_unlock_bh(&bp->phy_lock);
4581 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4583 struct bnx2 *bp = netdev_priv(dev);
4585 strcpy(info->driver, DRV_MODULE_NAME);
4586 strcpy(info->version, DRV_MODULE_VERSION);
4587 strcpy(info->bus_info, pci_name(bp->pdev));
4588 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4589 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4590 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4591 info->fw_version[1] = info->fw_version[3] = '.';
4592 info->fw_version[5] = 0;
4595 #define BNX2_REGDUMP_LEN (32 * 1024)
4598 bnx2_get_regs_len(struct net_device *dev)
4600 return BNX2_REGDUMP_LEN;
4604 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4606 u32 *p = _p, i, offset;
4608 struct bnx2 *bp = netdev_priv(dev);
4609 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4610 0x0800, 0x0880, 0x0c00, 0x0c10,
4611 0x0c30, 0x0d08, 0x1000, 0x101c,
4612 0x1040, 0x1048, 0x1080, 0x10a4,
4613 0x1400, 0x1490, 0x1498, 0x14f0,
4614 0x1500, 0x155c, 0x1580, 0x15dc,
4615 0x1600, 0x1658, 0x1680, 0x16d8,
4616 0x1800, 0x1820, 0x1840, 0x1854,
4617 0x1880, 0x1894, 0x1900, 0x1984,
4618 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4619 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4620 0x2000, 0x2030, 0x23c0, 0x2400,
4621 0x2800, 0x2820, 0x2830, 0x2850,
4622 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4623 0x3c00, 0x3c94, 0x4000, 0x4010,
4624 0x4080, 0x4090, 0x43c0, 0x4458,
4625 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4626 0x4fc0, 0x5010, 0x53c0, 0x5444,
4627 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4628 0x5fc0, 0x6000, 0x6400, 0x6428,
4629 0x6800, 0x6848, 0x684c, 0x6860,
4630 0x6888, 0x6910, 0x8000 };
4634 memset(p, 0, BNX2_REGDUMP_LEN);
4636 if (!netif_running(bp->dev))
4640 offset = reg_boundaries[0];
4642 while (offset < BNX2_REGDUMP_LEN) {
4643 *p++ = REG_RD(bp, offset);
4645 if (offset == reg_boundaries[i + 1]) {
4646 offset = reg_boundaries[i + 2];
4647 p = (u32 *) (orig_p + offset);
4654 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4656 struct bnx2 *bp = netdev_priv(dev);
4658 if (bp->flags & NO_WOL_FLAG) {
4663 wol->supported = WAKE_MAGIC;
4665 wol->wolopts = WAKE_MAGIC;
4669 memset(&wol->sopass, 0, sizeof(wol->sopass));
4673 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4675 struct bnx2 *bp = netdev_priv(dev);
4677 if (wol->wolopts & ~WAKE_MAGIC)
4680 if (wol->wolopts & WAKE_MAGIC) {
4681 if (bp->flags & NO_WOL_FLAG)
4693 bnx2_nway_reset(struct net_device *dev)
4695 struct bnx2 *bp = netdev_priv(dev);
4698 if (!(bp->autoneg & AUTONEG_SPEED)) {
4702 spin_lock_bh(&bp->phy_lock);
4704 /* Force a link down visible on the other side */
4705 if (bp->phy_flags & PHY_SERDES_FLAG) {
4706 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4707 spin_unlock_bh(&bp->phy_lock);
4711 spin_lock_bh(&bp->phy_lock);
4712 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4713 bp->current_interval = SERDES_AN_TIMEOUT;
4714 bp->serdes_an_pending = 1;
4715 mod_timer(&bp->timer, jiffies + bp->current_interval);
4719 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4720 bmcr &= ~BMCR_LOOPBACK;
4721 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4723 spin_unlock_bh(&bp->phy_lock);
4729 bnx2_get_eeprom_len(struct net_device *dev)
4731 struct bnx2 *bp = netdev_priv(dev);
4733 if (bp->flash_info == NULL)
4736 return (int) bp->flash_size;
4740 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4743 struct bnx2 *bp = netdev_priv(dev);
4746 /* parameters already validated in ethtool_get_eeprom */
4748 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4754 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4757 struct bnx2 *bp = netdev_priv(dev);
4760 /* parameters already validated in ethtool_set_eeprom */
4762 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4768 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4770 struct bnx2 *bp = netdev_priv(dev);
4772 memset(coal, 0, sizeof(struct ethtool_coalesce));
4774 coal->rx_coalesce_usecs = bp->rx_ticks;
4775 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4776 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4777 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4779 coal->tx_coalesce_usecs = bp->tx_ticks;
4780 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4781 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4782 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4784 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4790 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4792 struct bnx2 *bp = netdev_priv(dev);
4794 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4795 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4797 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4798 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4800 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4801 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4803 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4804 if (bp->rx_quick_cons_trip_int > 0xff)
4805 bp->rx_quick_cons_trip_int = 0xff;
4807 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4808 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4810 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4811 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4813 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4814 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4816 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4817 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4820 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4821 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4822 bp->stats_ticks &= 0xffff00;
4824 if (netif_running(bp->dev)) {
4825 bnx2_netif_stop(bp);
4827 bnx2_netif_start(bp);
4834 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4836 struct bnx2 *bp = netdev_priv(dev);
4838 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4839 ering->rx_mini_max_pending = 0;
4840 ering->rx_jumbo_max_pending = 0;
4842 ering->rx_pending = bp->rx_ring_size;
4843 ering->rx_mini_pending = 0;
4844 ering->rx_jumbo_pending = 0;
4846 ering->tx_max_pending = MAX_TX_DESC_CNT;
4847 ering->tx_pending = bp->tx_ring_size;
4851 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4853 struct bnx2 *bp = netdev_priv(dev);
4855 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4856 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4857 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4861 if (netif_running(bp->dev)) {
4862 bnx2_netif_stop(bp);
4863 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4868 bnx2_set_rx_ring_size(bp, ering->rx_pending);
4869 bp->tx_ring_size = ering->tx_pending;
4871 if (netif_running(bp->dev)) {
4874 rc = bnx2_alloc_mem(bp);
4878 bnx2_netif_start(bp);
4885 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4887 struct bnx2 *bp = netdev_priv(dev);
4889 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4890 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4891 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4895 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4897 struct bnx2 *bp = netdev_priv(dev);
4899 bp->req_flow_ctrl = 0;
4900 if (epause->rx_pause)
4901 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4902 if (epause->tx_pause)
4903 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4905 if (epause->autoneg) {
4906 bp->autoneg |= AUTONEG_FLOW_CTRL;
4909 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4912 spin_lock_bh(&bp->phy_lock);
4916 spin_unlock_bh(&bp->phy_lock);
4922 bnx2_get_rx_csum(struct net_device *dev)
4924 struct bnx2 *bp = netdev_priv(dev);
4930 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4932 struct bnx2 *bp = netdev_priv(dev);
4938 #define BNX2_NUM_STATS 45
4941 char string[ETH_GSTRING_LEN];
4942 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4944 { "rx_error_bytes" },
4946 { "tx_error_bytes" },
4947 { "rx_ucast_packets" },
4948 { "rx_mcast_packets" },
4949 { "rx_bcast_packets" },
4950 { "tx_ucast_packets" },
4951 { "tx_mcast_packets" },
4952 { "tx_bcast_packets" },
4953 { "tx_mac_errors" },
4954 { "tx_carrier_errors" },
4955 { "rx_crc_errors" },
4956 { "rx_align_errors" },
4957 { "tx_single_collisions" },
4958 { "tx_multi_collisions" },
4960 { "tx_excess_collisions" },
4961 { "tx_late_collisions" },
4962 { "tx_total_collisions" },
4965 { "rx_undersize_packets" },
4966 { "rx_oversize_packets" },
4967 { "rx_64_byte_packets" },
4968 { "rx_65_to_127_byte_packets" },
4969 { "rx_128_to_255_byte_packets" },
4970 { "rx_256_to_511_byte_packets" },
4971 { "rx_512_to_1023_byte_packets" },
4972 { "rx_1024_to_1522_byte_packets" },
4973 { "rx_1523_to_9022_byte_packets" },
4974 { "tx_64_byte_packets" },
4975 { "tx_65_to_127_byte_packets" },
4976 { "tx_128_to_255_byte_packets" },
4977 { "tx_256_to_511_byte_packets" },
4978 { "tx_512_to_1023_byte_packets" },
4979 { "tx_1024_to_1522_byte_packets" },
4980 { "tx_1523_to_9022_byte_packets" },
4981 { "rx_xon_frames" },
4982 { "rx_xoff_frames" },
4983 { "tx_xon_frames" },
4984 { "tx_xoff_frames" },
4985 { "rx_mac_ctrl_frames" },
4986 { "rx_filtered_packets" },
4990 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4992 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4993 STATS_OFFSET32(stat_IfHCInOctets_hi),
4994 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4995 STATS_OFFSET32(stat_IfHCOutOctets_hi),
4996 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
4997 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
4998 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
4999 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5000 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5001 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5002 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5003 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5004 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5005 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5006 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5007 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5008 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5009 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5010 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5011 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5012 STATS_OFFSET32(stat_EtherStatsCollisions),
5013 STATS_OFFSET32(stat_EtherStatsFragments),
5014 STATS_OFFSET32(stat_EtherStatsJabbers),
5015 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5016 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5017 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5018 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5019 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5020 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5021 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5022 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5023 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5024 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5025 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5026 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5027 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5028 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5029 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5030 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5031 STATS_OFFSET32(stat_XonPauseFramesReceived),
5032 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5033 STATS_OFFSET32(stat_OutXonSent),
5034 STATS_OFFSET32(stat_OutXoffSent),
5035 STATS_OFFSET32(stat_MacControlFramesReceived),
5036 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5037 STATS_OFFSET32(stat_IfInMBUFDiscards),
5040 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5041 * skipped because of errata.
5043 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5044 8,0,8,8,8,8,8,8,8,8,
5045 4,0,4,4,4,4,4,4,4,4,
5046 4,4,4,4,4,4,4,4,4,4,
5047 4,4,4,4,4,4,4,4,4,4,
5051 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5052 8,0,8,8,8,8,8,8,8,8,
5053 4,4,4,4,4,4,4,4,4,4,
5054 4,4,4,4,4,4,4,4,4,4,
5055 4,4,4,4,4,4,4,4,4,4,
5059 #define BNX2_NUM_TESTS 6
5062 char string[ETH_GSTRING_LEN];
5063 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5064 { "register_test (offline)" },
5065 { "memory_test (offline)" },
5066 { "loopback_test (offline)" },
5067 { "nvram_test (online)" },
5068 { "interrupt_test (online)" },
5069 { "link_test (online)" },
5073 bnx2_self_test_count(struct net_device *dev)
5075 return BNX2_NUM_TESTS;
5079 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5081 struct bnx2 *bp = netdev_priv(dev);
5083 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5084 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5085 bnx2_netif_stop(bp);
5086 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5089 if (bnx2_test_registers(bp) != 0) {
5091 etest->flags |= ETH_TEST_FL_FAILED;
5093 if (bnx2_test_memory(bp) != 0) {
5095 etest->flags |= ETH_TEST_FL_FAILED;
5097 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5098 etest->flags |= ETH_TEST_FL_FAILED;
5100 if (!netif_running(bp->dev)) {
5101 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5105 bnx2_netif_start(bp);
5108 /* wait for link up */
5109 msleep_interruptible(3000);
5110 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5111 msleep_interruptible(4000);
5114 if (bnx2_test_nvram(bp) != 0) {
5116 etest->flags |= ETH_TEST_FL_FAILED;
5118 if (bnx2_test_intr(bp) != 0) {
5120 etest->flags |= ETH_TEST_FL_FAILED;
5123 if (bnx2_test_link(bp) != 0) {
5125 etest->flags |= ETH_TEST_FL_FAILED;
5131 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5133 switch (stringset) {
5135 memcpy(buf, bnx2_stats_str_arr,
5136 sizeof(bnx2_stats_str_arr));
5139 memcpy(buf, bnx2_tests_str_arr,
5140 sizeof(bnx2_tests_str_arr));
5146 bnx2_get_stats_count(struct net_device *dev)
5148 return BNX2_NUM_STATS;
5152 bnx2_get_ethtool_stats(struct net_device *dev,
5153 struct ethtool_stats *stats, u64 *buf)
5155 struct bnx2 *bp = netdev_priv(dev);
5157 u32 *hw_stats = (u32 *) bp->stats_blk;
5158 u8 *stats_len_arr = NULL;
5160 if (hw_stats == NULL) {
5161 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5165 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5166 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5167 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5168 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5169 stats_len_arr = bnx2_5706_stats_len_arr;
5171 stats_len_arr = bnx2_5708_stats_len_arr;
5173 for (i = 0; i < BNX2_NUM_STATS; i++) {
5174 if (stats_len_arr[i] == 0) {
5175 /* skip this counter */
5179 if (stats_len_arr[i] == 4) {
5180 /* 4-byte counter */
5182 *(hw_stats + bnx2_stats_offset_arr[i]);
5185 /* 8-byte counter */
5186 buf[i] = (((u64) *(hw_stats +
5187 bnx2_stats_offset_arr[i])) << 32) +
5188 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5193 bnx2_phys_id(struct net_device *dev, u32 data)
5195 struct bnx2 *bp = netdev_priv(dev);
5202 save = REG_RD(bp, BNX2_MISC_CFG);
5203 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5205 for (i = 0; i < (data * 2); i++) {
5207 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5210 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5211 BNX2_EMAC_LED_1000MB_OVERRIDE |
5212 BNX2_EMAC_LED_100MB_OVERRIDE |
5213 BNX2_EMAC_LED_10MB_OVERRIDE |
5214 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5215 BNX2_EMAC_LED_TRAFFIC);
5217 msleep_interruptible(500);
5218 if (signal_pending(current))
5221 REG_WR(bp, BNX2_EMAC_LED, 0);
5222 REG_WR(bp, BNX2_MISC_CFG, save);
5226 static struct ethtool_ops bnx2_ethtool_ops = {
5227 .get_settings = bnx2_get_settings,
5228 .set_settings = bnx2_set_settings,
5229 .get_drvinfo = bnx2_get_drvinfo,
5230 .get_regs_len = bnx2_get_regs_len,
5231 .get_regs = bnx2_get_regs,
5232 .get_wol = bnx2_get_wol,
5233 .set_wol = bnx2_set_wol,
5234 .nway_reset = bnx2_nway_reset,
5235 .get_link = ethtool_op_get_link,
5236 .get_eeprom_len = bnx2_get_eeprom_len,
5237 .get_eeprom = bnx2_get_eeprom,
5238 .set_eeprom = bnx2_set_eeprom,
5239 .get_coalesce = bnx2_get_coalesce,
5240 .set_coalesce = bnx2_set_coalesce,
5241 .get_ringparam = bnx2_get_ringparam,
5242 .set_ringparam = bnx2_set_ringparam,
5243 .get_pauseparam = bnx2_get_pauseparam,
5244 .set_pauseparam = bnx2_set_pauseparam,
5245 .get_rx_csum = bnx2_get_rx_csum,
5246 .set_rx_csum = bnx2_set_rx_csum,
5247 .get_tx_csum = ethtool_op_get_tx_csum,
5248 .set_tx_csum = ethtool_op_set_tx_csum,
5249 .get_sg = ethtool_op_get_sg,
5250 .set_sg = ethtool_op_set_sg,
5252 .get_tso = ethtool_op_get_tso,
5253 .set_tso = ethtool_op_set_tso,
5255 .self_test_count = bnx2_self_test_count,
5256 .self_test = bnx2_self_test,
5257 .get_strings = bnx2_get_strings,
5258 .phys_id = bnx2_phys_id,
5259 .get_stats_count = bnx2_get_stats_count,
5260 .get_ethtool_stats = bnx2_get_ethtool_stats,
5261 .get_perm_addr = ethtool_op_get_perm_addr,
5264 /* Called with rtnl_lock */
5266 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5268 struct mii_ioctl_data *data = if_mii(ifr);
5269 struct bnx2 *bp = netdev_priv(dev);
5274 data->phy_id = bp->phy_addr;
5280 spin_lock_bh(&bp->phy_lock);
5281 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5282 spin_unlock_bh(&bp->phy_lock);
5284 data->val_out = mii_regval;
5290 if (!capable(CAP_NET_ADMIN))
5293 spin_lock_bh(&bp->phy_lock);
5294 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5295 spin_unlock_bh(&bp->phy_lock);
5306 /* Called with rtnl_lock */
5308 bnx2_change_mac_addr(struct net_device *dev, void *p)
5310 struct sockaddr *addr = p;
5311 struct bnx2 *bp = netdev_priv(dev);
5313 if (!is_valid_ether_addr(addr->sa_data))
5316 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5317 if (netif_running(dev))
5318 bnx2_set_mac_addr(bp);
5323 /* Called with rtnl_lock */
5325 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5327 struct bnx2 *bp = netdev_priv(dev);
5329 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5330 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5334 if (netif_running(dev)) {
5335 bnx2_netif_stop(bp);
5339 bnx2_netif_start(bp);
5344 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5346 poll_bnx2(struct net_device *dev)
5348 struct bnx2 *bp = netdev_priv(dev);
5350 disable_irq(bp->pdev->irq);
5351 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5352 enable_irq(bp->pdev->irq);
5356 static int __devinit
5357 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5360 unsigned long mem_len;
5364 SET_MODULE_OWNER(dev);
5365 SET_NETDEV_DEV(dev, &pdev->dev);
5366 bp = netdev_priv(dev);
5371 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5372 rc = pci_enable_device(pdev);
5374 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5378 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5379 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5382 goto err_out_disable;
5385 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5387 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5388 goto err_out_disable;
5391 pci_set_master(pdev);
5393 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5394 if (bp->pm_cap == 0) {
5395 printk(KERN_ERR PFX "Cannot find power management capability, "
5398 goto err_out_release;
5401 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5402 if (bp->pcix_cap == 0) {
5403 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5405 goto err_out_release;
5408 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5409 bp->flags |= USING_DAC_FLAG;
5410 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5411 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5412 "failed, aborting.\n");
5414 goto err_out_release;
5417 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5418 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5420 goto err_out_release;
5426 spin_lock_init(&bp->phy_lock);
5427 spin_lock_init(&bp->tx_lock);
5428 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5430 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5431 mem_len = MB_GET_CID_ADDR(17);
5432 dev->mem_end = dev->mem_start + mem_len;
5433 dev->irq = pdev->irq;
5435 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5438 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5440 goto err_out_release;
5443 /* Configure byte swap and enable write to the reg_window registers.
5444 * Rely on CPU to do target byte swapping on big endian systems
5445 * The chip's target access swapping will not swap all accesses
5447 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5448 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5449 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5451 bnx2_set_power_state(bp, PCI_D0);
5453 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5455 /* Get bus information. */
5456 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5457 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5460 bp->flags |= PCIX_FLAG;
5462 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5464 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5466 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5467 bp->bus_speed_mhz = 133;
5470 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5471 bp->bus_speed_mhz = 100;
5474 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5475 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5476 bp->bus_speed_mhz = 66;
5479 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5480 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5481 bp->bus_speed_mhz = 50;
5484 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5485 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5486 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5487 bp->bus_speed_mhz = 33;
5492 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5493 bp->bus_speed_mhz = 66;
5495 bp->bus_speed_mhz = 33;
5498 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5499 bp->flags |= PCI_32BIT_FLAG;
5501 /* 5706A0 may falsely detect SERR and PERR. */
5502 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5503 reg = REG_RD(bp, PCI_COMMAND);
5504 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5505 REG_WR(bp, PCI_COMMAND, reg);
5507 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5508 !(bp->flags & PCIX_FLAG)) {
5510 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5515 bnx2_init_nvram(bp);
5517 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5519 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5520 BNX2_SHM_HDR_SIGNATURE_SIG)
5521 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5523 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5525 /* Get the permanent MAC address. First we need to make sure the
5526 * firmware is actually running.
5528 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5530 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5531 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5532 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5537 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5539 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5540 bp->mac_addr[0] = (u8) (reg >> 8);
5541 bp->mac_addr[1] = (u8) reg;
5543 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5544 bp->mac_addr[2] = (u8) (reg >> 24);
5545 bp->mac_addr[3] = (u8) (reg >> 16);
5546 bp->mac_addr[4] = (u8) (reg >> 8);
5547 bp->mac_addr[5] = (u8) reg;
5549 bp->tx_ring_size = MAX_TX_DESC_CNT;
5550 bnx2_set_rx_ring_size(bp, 100);
5554 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5556 bp->tx_quick_cons_trip_int = 20;
5557 bp->tx_quick_cons_trip = 20;
5558 bp->tx_ticks_int = 80;
5561 bp->rx_quick_cons_trip_int = 6;
5562 bp->rx_quick_cons_trip = 6;
5563 bp->rx_ticks_int = 18;
5566 bp->stats_ticks = 1000000 & 0xffff00;
5568 bp->timer_interval = HZ;
5569 bp->current_interval = HZ;
5573 /* Disable WOL support if we are running on a SERDES chip. */
5574 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5575 bp->phy_flags |= PHY_SERDES_FLAG;
5576 bp->flags |= NO_WOL_FLAG;
5577 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5579 reg = REG_RD_IND(bp, bp->shmem_base +
5580 BNX2_SHARED_HW_CFG_CONFIG);
5581 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5582 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5586 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5587 bp->flags |= NO_WOL_FLAG;
5589 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5590 bp->tx_quick_cons_trip_int =
5591 bp->tx_quick_cons_trip;
5592 bp->tx_ticks_int = bp->tx_ticks;
5593 bp->rx_quick_cons_trip_int =
5594 bp->rx_quick_cons_trip;
5595 bp->rx_ticks_int = bp->rx_ticks;
5596 bp->comp_prod_trip_int = bp->comp_prod_trip;
5597 bp->com_ticks_int = bp->com_ticks;
5598 bp->cmd_ticks_int = bp->cmd_ticks;
5601 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5602 bp->req_line_speed = 0;
5603 if (bp->phy_flags & PHY_SERDES_FLAG) {
5604 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5606 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5607 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5608 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5610 bp->req_line_speed = bp->line_speed = SPEED_1000;
5611 bp->req_duplex = DUPLEX_FULL;
5615 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5618 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5620 init_timer(&bp->timer);
5621 bp->timer.expires = RUN_AT(bp->timer_interval);
5622 bp->timer.data = (unsigned long) bp;
5623 bp->timer.function = bnx2_timer;
5629 iounmap(bp->regview);
5634 pci_release_regions(pdev);
5637 pci_disable_device(pdev);
5638 pci_set_drvdata(pdev, NULL);
5644 static int __devinit
5645 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5647 static int version_printed = 0;
5648 struct net_device *dev = NULL;
5652 if (version_printed++ == 0)
5653 printk(KERN_INFO "%s", version);
5655 /* dev zeroed in init_etherdev */
5656 dev = alloc_etherdev(sizeof(*bp));
5661 rc = bnx2_init_board(pdev, dev);
5667 dev->open = bnx2_open;
5668 dev->hard_start_xmit = bnx2_start_xmit;
5669 dev->stop = bnx2_close;
5670 dev->get_stats = bnx2_get_stats;
5671 dev->set_multicast_list = bnx2_set_rx_mode;
5672 dev->do_ioctl = bnx2_ioctl;
5673 dev->set_mac_address = bnx2_change_mac_addr;
5674 dev->change_mtu = bnx2_change_mtu;
5675 dev->tx_timeout = bnx2_tx_timeout;
5676 dev->watchdog_timeo = TX_TIMEOUT;
5678 dev->vlan_rx_register = bnx2_vlan_rx_register;
5679 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5681 dev->poll = bnx2_poll;
5682 dev->ethtool_ops = &bnx2_ethtool_ops;
5685 bp = netdev_priv(dev);
5687 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5688 dev->poll_controller = poll_bnx2;
5691 if ((rc = register_netdev(dev))) {
5692 printk(KERN_ERR PFX "Cannot register net device\n");
5694 iounmap(bp->regview);
5695 pci_release_regions(pdev);
5696 pci_disable_device(pdev);
5697 pci_set_drvdata(pdev, NULL);
5702 pci_set_drvdata(pdev, dev);
5704 memcpy(dev->dev_addr, bp->mac_addr, 6);
5705 memcpy(dev->perm_addr, bp->mac_addr, 6);
5706 bp->name = board_info[ent->driver_data].name,
5707 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5711 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5712 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5713 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5714 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5719 printk("node addr ");
5720 for (i = 0; i < 6; i++)
5721 printk("%2.2x", dev->dev_addr[i]);
5724 dev->features |= NETIF_F_SG;
5725 if (bp->flags & USING_DAC_FLAG)
5726 dev->features |= NETIF_F_HIGHDMA;
5727 dev->features |= NETIF_F_IP_CSUM;
5729 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5732 dev->features |= NETIF_F_TSO;
5735 netif_carrier_off(bp->dev);
5740 static void __devexit
5741 bnx2_remove_one(struct pci_dev *pdev)
5743 struct net_device *dev = pci_get_drvdata(pdev);
5744 struct bnx2 *bp = netdev_priv(dev);
5746 flush_scheduled_work();
5748 unregister_netdev(dev);
5751 iounmap(bp->regview);
5754 pci_release_regions(pdev);
5755 pci_disable_device(pdev);
5756 pci_set_drvdata(pdev, NULL);
5760 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5762 struct net_device *dev = pci_get_drvdata(pdev);
5763 struct bnx2 *bp = netdev_priv(dev);
5766 if (!netif_running(dev))
5769 flush_scheduled_work();
5770 bnx2_netif_stop(bp);
5771 netif_device_detach(dev);
5772 del_timer_sync(&bp->timer);
5773 if (bp->flags & NO_WOL_FLAG)
5774 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5776 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5778 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5779 bnx2_reset_chip(bp, reset_code);
5781 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5786 bnx2_resume(struct pci_dev *pdev)
5788 struct net_device *dev = pci_get_drvdata(pdev);
5789 struct bnx2 *bp = netdev_priv(dev);
5791 if (!netif_running(dev))
5794 bnx2_set_power_state(bp, PCI_D0);
5795 netif_device_attach(dev);
5797 bnx2_netif_start(bp);
5801 static struct pci_driver bnx2_pci_driver = {
5802 .name = DRV_MODULE_NAME,
5803 .id_table = bnx2_pci_tbl,
5804 .probe = bnx2_init_one,
5805 .remove = __devexit_p(bnx2_remove_one),
5806 .suspend = bnx2_suspend,
5807 .resume = bnx2_resume,
5810 static int __init bnx2_init(void)
5812 return pci_module_init(&bnx2_pci_driver);
5815 static void __exit bnx2_cleanup(void)
5817 pci_unregister_driver(&bnx2_pci_driver);
5820 module_init(bnx2_init);
5821 module_exit(bnx2_cleanup);