1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
245 BNX2X_ERR("dmae timeout!\n");
251 mutex_unlock(&bp->dmae_mutex);
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 struct dmae_command *dmae = &bp->init_dmae;
257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 mutex_lock(&bp->dmae_mutex);
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 DMAE_CMD_ENDIANITY_DW_SWAP |
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
309 while (*wb_comp != DMAE_COMP_VAL) {
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
318 BNX2X_ERR("dmae timeout!\n");
323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327 mutex_unlock(&bp->dmae_mutex);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
345 REG_RD_DMAE(bp, reg, wb_data, 2);
347 return HILO_U64(wb_data[0], wb_data[1]);
351 static int bnx2x_mc_assert(struct bnx2x *bp)
355 u32 row0, row1, row2, row3;
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
472 static void bnx2x_fw_dump(struct bnx2x *bp)
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 printk(KERN_CONT "%s", (char *)data);
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x *bp)
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507 BNX2X_ERR("begin crash dump -----------------\n");
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
582 " spq_prod_idx(%u)\n",
583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
588 BNX2X_ERR("end crash dump -----------------\n");
591 static void bnx2x_int_enable(struct bnx2x *bp)
593 int port = BP_PORT(bp);
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
611 REG_WR(bp, addr, val);
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
617 val, port, addr, msix);
619 REG_WR(bp, addr, val);
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
626 /* enable nig attention */
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 static void bnx2x_int_disable(struct bnx2x *bp)
638 int port = BP_PORT(bp);
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
655 static void bnx2x_int_disable_sync(struct bnx2x *bp)
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
660 /* disable interrupt handling */
661 atomic_inc(&bp->intr_sem);
662 /* prevent the HW from sending interrupts */
663 bnx2x_int_disable(bp);
665 /* make sure all ISRs are done */
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
673 synchronize_irq(bp->pdev->irq);
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
682 * General service functions
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686 u8 storm, u16 index, u8 op, u8 update)
688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
689 struct igu_ack_register igu_ack;
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
703 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705 struct host_status_block *fpsb = fp->status_blk;
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
720 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
722 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
724 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
727 if ((fp->rx_comp_cons != rx_cons_sb) ||
728 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
729 (fp->tx_pkt_prod != fp->tx_pkt_cons))
735 static u16 bnx2x_ack_int(struct bnx2x *bp)
737 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
738 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
740 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
741 result, BAR_IGU_INTMEM + igu_addr);
744 #warning IGU_DEBUG active
746 BNX2X_ERR("read %x from IGU\n", result);
747 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
755 * fast path service functions
758 /* free skb in the packet ring at pos idx
759 * return idx of last bd freed
761 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
764 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
765 struct eth_tx_bd *tx_bd;
766 struct sk_buff *skb = tx_buf->skb;
767 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
770 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
774 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
775 tx_bd = &fp->tx_desc_ring[bd_idx];
776 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
777 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
779 nbd = le16_to_cpu(tx_bd->nbd) - 1;
780 new_cons = nbd + tx_buf->first_bd;
781 #ifdef BNX2X_STOP_ON_ERROR
782 if (nbd > (MAX_SKB_FRAGS + 2)) {
783 BNX2X_ERR("BAD nbd!\n");
788 /* Skip a parse bd and the TSO split header bd
789 since they have no mapping */
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
794 ETH_TX_BD_FLAGS_TCP_CSUM |
795 ETH_TX_BD_FLAGS_SW_LSO)) {
797 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_bd = &fp->tx_desc_ring[bd_idx];
799 /* is this a TSO split header bd? */
800 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
809 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
810 tx_bd = &fp->tx_desc_ring[bd_idx];
811 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
812 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
814 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 tx_buf->first_bd = 0;
826 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
832 barrier(); /* Tell compiler that prod and cons can change */
833 prod = fp->tx_bd_prod;
834 cons = fp->tx_bd_cons;
836 /* NUM_TX_RINGS = number of "next-page" entries
837 It will be used as a threshold */
838 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
840 #ifdef BNX2X_STOP_ON_ERROR
842 WARN_ON(used > fp->bp->tx_ring_size);
843 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
846 return (s16)(fp->bp->tx_ring_size) - used;
849 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
851 struct bnx2x *bp = fp->bp;
852 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
855 #ifdef BNX2X_STOP_ON_ERROR
856 if (unlikely(bp->panic))
860 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
861 sw_cons = fp->tx_pkt_cons;
863 while (sw_cons != hw_cons) {
866 pkt_cons = TX_BD(sw_cons);
868 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
870 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
871 hw_cons, sw_cons, pkt_cons);
873 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
875 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
878 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 fp->tx_pkt_cons = sw_cons;
887 fp->tx_bd_cons = bd_cons;
889 /* Need to make the tx_cons update visible to start_xmit()
890 * before checking for netif_queue_stopped(). Without the
891 * memory barrier, there is a small possibility that start_xmit()
892 * will miss it and cause the queue to be stopped forever.
896 /* TBD need a thresh? */
897 if (unlikely(netif_queue_stopped(bp->dev))) {
899 netif_tx_lock(bp->dev);
901 if (netif_queue_stopped(bp->dev) &&
902 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
903 netif_wake_queue(bp->dev);
905 netif_tx_unlock(bp->dev);
909 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
910 union eth_rx_cqe *rr_cqe)
912 struct bnx2x *bp = fp->bp;
913 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
914 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
917 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
918 FP_IDX(fp), cid, command, bp->state,
919 rr_cqe->ramrod_cqe.ramrod_type);
924 switch (command | fp->state) {
925 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
926 BNX2X_FP_STATE_OPENING):
927 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
929 fp->state = BNX2X_FP_STATE_OPEN;
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
933 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
935 fp->state = BNX2X_FP_STATE_HALTED;
939 BNX2X_ERR("unexpected MC reply (%d) "
940 "fp->state is %x\n", command, fp->state);
943 mb(); /* force bnx2x_wait_ramrod() to see the change */
947 switch (command | bp->state) {
948 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
949 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
950 bp->state = BNX2X_STATE_OPEN;
953 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
954 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
955 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
956 fp->state = BNX2X_FP_STATE_HALTED;
959 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
960 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
961 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
965 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
966 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
967 bp->set_mac_pending = 0;
970 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
971 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
975 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
979 mb(); /* force bnx2x_wait_ramrod() to see the change */
982 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
983 struct bnx2x_fastpath *fp, u16 index)
985 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
986 struct page *page = sw_buf->page;
987 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
989 /* Skip "next page" elements */
993 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
994 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
995 __free_pages(page, PAGES_PER_SGE_SHIFT);
1002 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1003 struct bnx2x_fastpath *fp, int last)
1007 for (i = 0; i < last; i++)
1008 bnx2x_free_rx_sge(bp, fp, i);
1011 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1012 struct bnx2x_fastpath *fp, u16 index)
1014 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1015 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1016 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1019 if (unlikely(page == NULL))
1022 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1023 PCI_DMA_FROMDEVICE);
1024 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1025 __free_pages(page, PAGES_PER_SGE_SHIFT);
1029 sw_buf->page = page;
1030 pci_unmap_addr_set(sw_buf, mapping, mapping);
1032 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1033 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1038 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1039 struct bnx2x_fastpath *fp, u16 index)
1041 struct sk_buff *skb;
1042 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1043 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1046 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1047 if (unlikely(skb == NULL))
1050 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1051 PCI_DMA_FROMDEVICE);
1052 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058 pci_unmap_addr_set(rx_buf, mapping, mapping);
1060 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1061 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1066 /* note that we are not allocating a new skb,
1067 * we are just moving one from cons to prod
1068 * we are not creating a new mapping,
1069 * so there is no need to check for dma_mapping_error().
1071 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1072 struct sk_buff *skb, u16 cons, u16 prod)
1074 struct bnx2x *bp = fp->bp;
1075 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1076 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1077 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1078 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1080 pci_dma_sync_single_for_device(bp->pdev,
1081 pci_unmap_addr(cons_rx_buf, mapping),
1082 bp->rx_offset + RX_COPY_THRESH,
1083 PCI_DMA_FROMDEVICE);
1085 prod_rx_buf->skb = cons_rx_buf->skb;
1086 pci_unmap_addr_set(prod_rx_buf, mapping,
1087 pci_unmap_addr(cons_rx_buf, mapping));
1088 *prod_bd = *cons_bd;
1091 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1094 u16 last_max = fp->last_max_sge;
1096 if (SUB_S16(idx, last_max) > 0)
1097 fp->last_max_sge = idx;
1100 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1104 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1105 int idx = RX_SGE_CNT * i - 1;
1107 for (j = 0; j < 2; j++) {
1108 SGE_MASK_CLEAR_BIT(fp, idx);
1114 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1115 struct eth_fast_path_rx_cqe *fp_cqe)
1117 struct bnx2x *bp = fp->bp;
1118 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1119 le16_to_cpu(fp_cqe->len_on_bd)) >>
1121 u16 last_max, last_elem, first_elem;
1128 /* First mark all used pages */
1129 for (i = 0; i < sge_len; i++)
1130 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1132 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1133 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1135 /* Here we assume that the last SGE index is the biggest */
1136 prefetch((void *)(fp->sge_mask));
1137 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1139 last_max = RX_SGE(fp->last_max_sge);
1140 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1141 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1143 /* If ring is not full */
1144 if (last_elem + 1 != first_elem)
1147 /* Now update the prod */
1148 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1149 if (likely(fp->sge_mask[i]))
1152 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1153 delta += RX_SGE_MASK_ELEM_SZ;
1157 fp->rx_sge_prod += delta;
1158 /* clear page-end entries */
1159 bnx2x_clear_sge_mask_next_elems(fp);
1162 DP(NETIF_MSG_RX_STATUS,
1163 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1164 fp->last_max_sge, fp->rx_sge_prod);
1167 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1170 memset(fp->sge_mask, 0xff,
1171 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1173 /* Clear the two last indeces in the page to 1:
1174 these are the indeces that correspond to the "next" element,
1175 hence will never be indicated and should be removed from
1176 the calculations. */
1177 bnx2x_clear_sge_mask_next_elems(fp);
1180 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1181 struct sk_buff *skb, u16 cons, u16 prod)
1183 struct bnx2x *bp = fp->bp;
1184 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1185 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1186 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1189 /* move empty skb from pool to prod and map it */
1190 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1191 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1192 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1193 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1195 /* move partial skb from cons to pool (don't unmap yet) */
1196 fp->tpa_pool[queue] = *cons_rx_buf;
1198 /* mark bin state as start - print error if current state != stop */
1199 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1200 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1202 fp->tpa_state[queue] = BNX2X_TPA_START;
1204 /* point prod_bd to new skb */
1205 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1206 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1208 #ifdef BNX2X_STOP_ON_ERROR
1209 fp->tpa_queue_used |= (1 << queue);
1210 #ifdef __powerpc64__
1211 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1213 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1215 fp->tpa_queue_used);
1219 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1220 struct sk_buff *skb,
1221 struct eth_fast_path_rx_cqe *fp_cqe,
1224 struct sw_rx_page *rx_pg, old_rx_pg;
1226 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1227 u32 i, frag_len, frag_size, pages;
1231 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1232 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1234 /* This is needed in order to enable forwarding support */
1236 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1237 max(frag_size, (u32)len_on_bd));
1239 #ifdef BNX2X_STOP_ON_ERROR
1240 if (pages > 8*PAGES_PER_SGE) {
1241 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1243 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1244 fp_cqe->pkt_len, len_on_bd);
1250 /* Run through the SGL and compose the fragmented skb */
1251 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1252 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1254 /* FW gives the indices of the SGE as if the ring is an array
1255 (meaning that "next" element will consume 2 indices) */
1256 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1257 rx_pg = &fp->rx_page_ring[sge_idx];
1261 /* If we fail to allocate a substitute page, we simply stop
1262 where we are and drop the whole packet */
1263 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1264 if (unlikely(err)) {
1265 bp->eth_stats.rx_skb_alloc_failed++;
1269 /* Unmap the page as we r going to pass it to the stack */
1270 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1271 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1273 /* Add one frag and update the appropriate fields in the skb */
1274 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1276 skb->data_len += frag_len;
1277 skb->truesize += frag_len;
1278 skb->len += frag_len;
1280 frag_size -= frag_len;
1286 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1287 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1290 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1291 struct sk_buff *skb = rx_buf->skb;
1293 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1295 /* Unmap skb in the pool anyway, as we are going to change
1296 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1298 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1299 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1301 if (likely(new_skb)) {
1302 /* fix ip xsum and give it to the stack */
1303 /* (no need to map the new skb) */
1306 prefetch(((char *)(skb)) + 128);
1308 #ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... "
1311 "pad %d len %d rx_buf_size %d\n",
1312 pad, len, bp->rx_buf_size);
1318 skb_reserve(skb, pad);
1321 skb->protocol = eth_type_trans(skb, bp->dev);
1322 skb->ip_summed = CHECKSUM_UNNECESSARY;
1327 iph = (struct iphdr *)skb->data;
1329 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1332 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1333 &cqe->fast_path_cqe, cqe_idx)) {
1335 if ((bp->vlgrp != NULL) &&
1336 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337 PARSING_FLAGS_VLAN))
1338 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1339 le16_to_cpu(cqe->fast_path_cqe.
1343 netif_receive_skb(skb);
1345 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1346 " - dropping packet!\n");
1350 bp->dev->last_rx = jiffies;
1352 /* put new skb in bin */
1353 fp->tpa_pool[queue].skb = new_skb;
1356 /* else drop the packet and keep the buffer in the bin */
1357 DP(NETIF_MSG_RX_STATUS,
1358 "Failed to allocate new skb - dropping packet!\n");
1359 bp->eth_stats.rx_skb_alloc_failed++;
1362 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1365 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1366 struct bnx2x_fastpath *fp,
1367 u16 bd_prod, u16 rx_comp_prod,
1370 struct tstorm_eth_rx_producers rx_prods = {0};
1373 /* Update producers */
1374 rx_prods.bd_prod = bd_prod;
1375 rx_prods.cqe_prod = rx_comp_prod;
1376 rx_prods.sge_prod = rx_sge_prod;
1378 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1379 REG_WR(bp, BAR_TSTRORM_INTMEM +
1380 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1381 ((u32 *)&rx_prods)[i]);
1383 DP(NETIF_MSG_RX_STATUS,
1384 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1385 bd_prod, rx_comp_prod, rx_sge_prod);
1388 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 struct bnx2x *bp = fp->bp;
1391 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1392 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396 #ifdef BNX2X_STOP_ON_ERROR
1397 if (unlikely(bp->panic))
1401 /* CQ "next element" is of the size of the regular element,
1402 that's why it's ok here */
1403 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1404 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407 bd_cons = fp->rx_bd_cons;
1408 bd_prod = fp->rx_bd_prod;
1409 bd_prod_fw = bd_prod;
1410 sw_comp_cons = fp->rx_comp_cons;
1411 sw_comp_prod = fp->rx_comp_prod;
1413 /* Memory barrier necessary as speculative reads of the rx
1414 * buffer can be ahead of the index in the status block
1418 DP(NETIF_MSG_RX_STATUS,
1419 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1420 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1422 while (sw_comp_cons != hw_comp_cons) {
1423 struct sw_rx_bd *rx_buf = NULL;
1424 struct sk_buff *skb;
1425 union eth_rx_cqe *cqe;
1429 comp_ring_cons = RCQ_BD(sw_comp_cons);
1430 bd_prod = RX_BD(bd_prod);
1431 bd_cons = RX_BD(bd_cons);
1433 cqe = &fp->rx_comp_ring[comp_ring_cons];
1434 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1436 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1437 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1438 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1439 cqe->fast_path_cqe.rss_hash_result,
1440 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1441 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1443 /* is this a slowpath msg? */
1444 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1445 bnx2x_sp_event(fp, cqe);
1448 /* this is an rx packet */
1450 rx_buf = &fp->rx_buf_ring[bd_cons];
1452 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1453 pad = cqe->fast_path_cqe.placement_offset;
1455 /* If CQE is marked both TPA_START and TPA_END
1456 it is a non-TPA CQE */
1457 if ((!fp->disable_tpa) &&
1458 (TPA_TYPE(cqe_fp_flags) !=
1459 (TPA_TYPE_START | TPA_TYPE_END))) {
1460 queue = cqe->fast_path_cqe.queue_index;
1462 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1463 DP(NETIF_MSG_RX_STATUS,
1464 "calling tpa_start on queue %d\n",
1467 bnx2x_tpa_start(fp, queue, skb,
1472 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1473 DP(NETIF_MSG_RX_STATUS,
1474 "calling tpa_stop on queue %d\n",
1477 if (!BNX2X_RX_SUM_FIX(cqe))
1478 BNX2X_ERR("STOP on none TCP "
1481 /* This is a size of the linear data
1483 len = le16_to_cpu(cqe->fast_path_cqe.
1485 bnx2x_tpa_stop(bp, fp, queue, pad,
1486 len, cqe, comp_ring_cons);
1487 #ifdef BNX2X_STOP_ON_ERROR
1492 bnx2x_update_sge_prod(fp,
1493 &cqe->fast_path_cqe);
1498 pci_dma_sync_single_for_device(bp->pdev,
1499 pci_unmap_addr(rx_buf, mapping),
1500 pad + RX_COPY_THRESH,
1501 PCI_DMA_FROMDEVICE);
1503 prefetch(((char *)(skb)) + 128);
1505 /* is this an error packet? */
1506 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1507 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons);
1510 bp->eth_stats.rx_err_discard_pkt++;
1514 /* Since we don't have a jumbo ring
1515 * copy small packets if mtu > 1500
1517 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1518 (len <= RX_COPY_THRESH)) {
1519 struct sk_buff *new_skb;
1521 new_skb = netdev_alloc_skb(bp->dev,
1523 if (new_skb == NULL) {
1524 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped "
1526 "because of alloc failure\n");
1527 bp->eth_stats.rx_skb_alloc_failed++;
1532 skb_copy_from_linear_data_offset(skb, pad,
1533 new_skb->data + pad, len);
1534 skb_reserve(new_skb, pad);
1535 skb_put(new_skb, len);
1537 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1541 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1542 pci_unmap_single(bp->pdev,
1543 pci_unmap_addr(rx_buf, mapping),
1544 bp->rx_buf_use_size,
1545 PCI_DMA_FROMDEVICE);
1546 skb_reserve(skb, pad);
1550 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because "
1552 "of alloc failure\n");
1553 bp->eth_stats.rx_skb_alloc_failed++;
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1559 skb->protocol = eth_type_trans(skb, bp->dev);
1561 skb->ip_summed = CHECKSUM_NONE;
1563 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 skb->ip_summed = CHECKSUM_UNNECESSARY;
1566 bp->eth_stats.hw_csum_err++;
1571 if ((bp->vlgrp != NULL) &&
1572 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1573 PARSING_FLAGS_VLAN))
1574 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1575 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578 netif_receive_skb(skb);
1580 bp->dev->last_rx = jiffies;
1585 bd_cons = NEXT_RX_IDX(bd_cons);
1586 bd_prod = NEXT_RX_IDX(bd_prod);
1587 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1590 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1591 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1593 if (rx_pkt == budget)
1597 fp->rx_bd_cons = bd_cons;
1598 fp->rx_bd_prod = bd_prod_fw;
1599 fp->rx_comp_cons = sw_comp_cons;
1600 fp->rx_comp_prod = sw_comp_prod;
1602 /* Update producers */
1603 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605 mmiowb(); /* keep prod updates ordered */
1607 fp->rx_pkt += rx_pkt;
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
1617 struct net_device *dev = bp->dev;
1618 int index = FP_IDX(fp);
1620 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1621 index, FP_SB_ID(fp));
1622 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1624 #ifdef BNX2X_STOP_ON_ERROR
1625 if (unlikely(bp->panic))
1629 prefetch(fp->rx_cons_sb);
1630 prefetch(fp->tx_cons_sb);
1631 prefetch(&fp->status_blk->c_status_block.status_block_index);
1632 prefetch(&fp->status_blk->u_status_block.status_block_index);
1634 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1639 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1641 struct net_device *dev = dev_instance;
1642 struct bnx2x *bp = netdev_priv(dev);
1643 u16 status = bnx2x_ack_int(bp);
1646 /* Return here if interrupt is shared and it's not for us */
1647 if (unlikely(status == 0)) {
1648 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1651 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1653 #ifdef BNX2X_STOP_ON_ERROR
1654 if (unlikely(bp->panic))
1658 /* Return here if interrupt is disabled */
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1664 mask = 0x2 << bp->fp[0].sb_id;
1665 if (status & mask) {
1666 struct bnx2x_fastpath *fp = &bp->fp[0];
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1673 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1679 if (unlikely(status & 0x1)) {
1680 schedule_work(&bp->sp_task);
1688 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694 /* end of fast path */
1696 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1701 * General service functions
1704 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1707 u32 resource_bit = (1 << resource);
1708 u8 port = BP_PORT(bp);
1711 /* Validating that the resource is within range */
1712 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1714 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1715 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1719 /* Validating that the resource is not already taken */
1720 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
1730 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1732 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1733 if (lock_status & resource_bit)
1738 DP(NETIF_MSG_HW, "Timeout\n");
1742 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1745 u32 resource_bit = (1 << resource);
1746 u8 port = BP_PORT(bp);
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1756 /* Validating that the resource is currently taken */
1757 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1764 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773 mutex_lock(&bp->port.phy_mutex);
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1780 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788 mutex_unlock(&bp->port.phy_mutex);
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1806 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 u32 spio_mask = (1 << spio_num);
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1855 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1911 static void bnx2x_link_report(struct bnx2x *bp)
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918 printk("%d Mbps ", bp->link_vars.line_speed);
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1923 printk("half duplex");
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1931 printk(", transmit ");
1933 printk("flow control ON");
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 if (!BP_NOMCP(bp)) {
1948 /* Initialize link parameters structure variables */
1949 bp->link_params.mtu = bp->dev->mtu;
1951 bnx2x_phy_hw_lock(bp);
1952 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953 bnx2x_phy_hw_unlock(bp);
1955 if (bp->link_vars.link_up)
1956 bnx2x_link_report(bp);
1958 bnx2x_calc_fc_adv(bp);
1962 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1966 static void bnx2x_link_set(struct bnx2x *bp)
1968 if (!BP_NOMCP(bp)) {
1969 bnx2x_phy_hw_lock(bp);
1970 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971 bnx2x_phy_hw_unlock(bp);
1973 bnx2x_calc_fc_adv(bp);
1975 BNX2X_ERR("Bootcode is missing -not setting link\n");
1978 static void bnx2x__link_reset(struct bnx2x *bp)
1980 if (!BP_NOMCP(bp)) {
1981 bnx2x_phy_hw_lock(bp);
1982 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983 bnx2x_phy_hw_unlock(bp);
1985 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1988 static u8 bnx2x_link_test(struct bnx2x *bp)
1992 bnx2x_phy_hw_lock(bp);
1993 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994 bnx2x_phy_hw_unlock(bp);
1999 /* Calculates the sum of vn_min_rates.
2000 It's needed for further normalizing of the min_rates.
2005 0 - if all the min_rates are 0.
2006 In the later case fainess algorithm should be deactivated.
2007 If not all min_rates are zero then those that are zeroes will
2010 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2012 int i, port = BP_PORT(bp);
2016 for (i = 0; i < E1HVN_MAX; i++) {
2018 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022 /* If min rate is zero - set it to 1 */
2024 vn_min_rate = DEF_MIN_RATE;
2028 wsum += vn_min_rate;
2032 /* ... only if all min rates are zeros - disable FAIRNESS */
2039 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2042 struct cmng_struct_per_port *m_cmng_port)
2044 u32 r_param = port_rate / 8;
2045 int port = BP_PORT(bp);
2048 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2050 /* Enable minmax only if we are in e1hmf mode */
2052 u32 fair_periodic_timeout_usec;
2055 /* Enable rate shaping and fairness */
2056 m_cmng_port->flags.cmng_vn_enable = 1;
2057 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058 m_cmng_port->flags.rate_shaping_enable = 1;
2061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062 " fairness will be disabled\n");
2064 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065 m_cmng_port->rs_vars.rs_periodic_timeout =
2066 RS_PERIODIC_TIMEOUT_USEC / 4;
2068 /* this is the threshold below which no timer arming will occur
2069 1.25 coefficient is for the threshold to be a little bigger
2070 than the real time, to compensate for timer in-accuracy */
2071 m_cmng_port->rs_vars.rs_threshold =
2072 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2074 /* resolution of fairness timer */
2075 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077 t_fair = T_FAIR_COEF / port_rate;
2079 /* this is the threshold below which we won't arm
2080 the timer anymore */
2081 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2083 /* we multiply by 1e3/8 to get bytes/msec.
2084 We don't want the credits to pass a credit
2085 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086 m_cmng_port->fair_vars.upper_bound =
2087 r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 m_cmng_port->fair_vars.fairness_timeout =
2090 fair_periodic_timeout_usec / 4;
2093 /* Disable rate shaping and fairness */
2094 m_cmng_port->flags.cmng_vn_enable = 0;
2095 m_cmng_port->flags.fairness_enable = 0;
2096 m_cmng_port->flags.rate_shaping_enable = 0;
2099 "Single function mode minmax will be disabled\n");
2102 /* Store it to internal memory */
2103 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106 ((u32 *)(m_cmng_port))[i]);
2109 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110 u32 wsum, u16 port_rate,
2111 struct cmng_struct_per_port *m_cmng_port)
2113 struct rate_shaping_vars_per_vn m_rs_vn;
2114 struct fairness_vars_per_vn m_fair_vn;
2115 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116 u16 vn_min_rate, vn_max_rate;
2119 /* If function is hidden - set min and max to zeroes */
2120 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2125 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128 if current min rate is zero - set it to 1.
2129 This is a requirment of the algorithm. */
2130 if ((vn_min_rate == 0) && wsum)
2131 vn_min_rate = DEF_MIN_RATE;
2132 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2136 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2137 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2139 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2142 /* global vn counter - maximal Mbps for this vn */
2143 m_rs_vn.vn_counter.rate = vn_max_rate;
2145 /* quota - number of bytes transmitted in this period */
2146 m_rs_vn.vn_counter.quota =
2147 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2149 #ifdef BNX2X_PER_PROT_QOS
2150 /* per protocol counter */
2151 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152 /* maximal Mbps for this protocol */
2153 m_rs_vn.protocol_counters[protocol].rate =
2154 protocol_max_rate[protocol];
2155 /* the quota in each timer period -
2156 number of bytes transmitted in this period */
2157 m_rs_vn.protocol_counters[protocol].quota =
2158 (u32)(rs_periodic_timeout_usec *
2160 protocol_counters[protocol].rate/8));
2165 /* credit for each period of the fairness algorithm:
2166 number of bytes in T_FAIR (the vn share the port rate).
2167 wsum should not be larger than 10000, thus
2168 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169 m_fair_vn.vn_credit_delta =
2170 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173 m_fair_vn.vn_credit_delta);
2176 #ifdef BNX2X_PER_PROT_QOS
2178 u32 protocolWeightSum = 0;
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181 protocolWeightSum +=
2182 drvInit.protocol_min_rate[protocol];
2183 /* per protocol counter -
2184 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185 if (protocolWeightSum > 0) {
2187 protocol < NUM_OF_PROTOCOLS; protocol++)
2188 /* credit for each period of the
2189 fairness algorithm - number of bytes in
2190 T_FAIR (the protocol share the vn rate) */
2191 m_fair_vn.protocol_credit_delta[protocol] =
2192 (u32)((vn_min_rate / 8) * t_fair *
2193 protocol_min_rate / protocolWeightSum);
2198 /* Store it to internal memory */
2199 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202 ((u32 *)(&m_rs_vn))[i]);
2204 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207 ((u32 *)(&m_fair_vn))[i]);
2210 /* This function is called upon link interrupt */
2211 static void bnx2x_link_attn(struct bnx2x *bp)
2215 /* Make sure that we are synced with the current statistics */
2216 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2218 bnx2x_phy_hw_lock(bp);
2219 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220 bnx2x_phy_hw_unlock(bp);
2222 if (bp->link_vars.link_up) {
2224 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225 struct host_port_stats *pstats;
2227 pstats = bnx2x_sp(bp, port_stats);
2228 /* reset old bmac stats */
2229 memset(&(pstats->mac_stx[0]), 0,
2230 sizeof(struct mac_stx));
2232 if ((bp->state == BNX2X_STATE_OPEN) ||
2233 (bp->state == BNX2X_STATE_DISABLED))
2234 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2237 /* indicate link status */
2238 bnx2x_link_report(bp);
2243 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244 if (vn == BP_E1HVN(bp))
2247 func = ((vn << 1) | BP_PORT(bp));
2249 /* Set the attention towards other drivers
2251 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2256 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257 struct cmng_struct_per_port m_cmng_port;
2259 int port = BP_PORT(bp);
2261 /* Init RATE SHAPING and FAIRNESS contexts */
2262 wsum = bnx2x_calc_vn_wsum(bp);
2263 bnx2x_init_port_minmax(bp, (int)wsum,
2264 bp->link_vars.line_speed,
2267 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269 wsum, bp->link_vars.line_speed,
2274 static void bnx2x__link_status_update(struct bnx2x *bp)
2276 if (bp->state != BNX2X_STATE_OPEN)
2279 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2281 if (bp->link_vars.link_up)
2282 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2284 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286 /* indicate link status */
2287 bnx2x_link_report(bp);
2290 static void bnx2x_pmf_update(struct bnx2x *bp)
2292 int port = BP_PORT(bp);
2296 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2298 /* enable nig attention */
2299 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2303 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2311 * General service functions
2314 /* the slow path queue is odd since completions arrive on the fastpath ring */
2315 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316 u32 data_hi, u32 data_lo, int common)
2318 int func = BP_FUNC(bp);
2320 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2322 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2326 #ifdef BNX2X_STOP_ON_ERROR
2327 if (unlikely(bp->panic))
2331 spin_lock_bh(&bp->spq_lock);
2333 if (!bp->spq_left) {
2334 BNX2X_ERR("BUG! SPQ ring full!\n");
2335 spin_unlock_bh(&bp->spq_lock);
2340 /* CID needs port number to be encoded int it */
2341 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2344 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2346 bp->spq_prod_bd->hdr.type |=
2347 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2349 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2364 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2367 spin_unlock_bh(&bp->spq_lock);
2371 /* acquire split MCP access lock register */
2372 static int bnx2x_lock_alr(struct bnx2x *bp)
2379 for (j = 0; j < i*10; j++) {
2381 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383 if (val & (1L << 31))
2388 if (!(val & (1L << 31))) {
2389 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2396 /* Release split MCP access lock register */
2397 static void bnx2x_unlock_alr(struct bnx2x *bp)
2401 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2404 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2406 struct host_def_status_block *def_sb = bp->def_status_blk;
2409 barrier(); /* status block is written to by the chip */
2411 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2412 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2415 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2416 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2419 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2420 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2423 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2424 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2427 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2428 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 * slow path service functions
2438 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2440 int port = BP_PORT(bp);
2441 int func = BP_FUNC(bp);
2442 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2443 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2444 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2445 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2446 NIG_REG_MASK_INTERRUPT_PORT0;
2448 if (~bp->aeu_mask & (asserted & 0xff))
2449 BNX2X_ERR("IGU ERROR\n");
2450 if (bp->attn_state & asserted)
2451 BNX2X_ERR("IGU ERROR\n");
2453 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2454 bp->aeu_mask, asserted);
2455 bp->aeu_mask &= ~(asserted & 0xff);
2456 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2458 REG_WR(bp, aeu_addr, bp->aeu_mask);
2460 bp->attn_state |= asserted;
2462 if (asserted & ATTN_HARD_WIRED_MASK) {
2463 if (asserted & ATTN_NIG_FOR_FUNC) {
2465 /* save nig interrupt mask */
2466 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2467 REG_WR(bp, nig_int_mask_addr, 0);
2469 bnx2x_link_attn(bp);
2471 /* handle unicore attn? */
2473 if (asserted & ATTN_SW_TIMER_4_FUNC)
2474 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2476 if (asserted & GPIO_2_FUNC)
2477 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2479 if (asserted & GPIO_3_FUNC)
2480 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2482 if (asserted & GPIO_4_FUNC)
2483 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2486 if (asserted & ATTN_GENERAL_ATTN_1) {
2487 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2488 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2490 if (asserted & ATTN_GENERAL_ATTN_2) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2494 if (asserted & ATTN_GENERAL_ATTN_3) {
2495 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2499 if (asserted & ATTN_GENERAL_ATTN_4) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2503 if (asserted & ATTN_GENERAL_ATTN_5) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2507 if (asserted & ATTN_GENERAL_ATTN_6) {
2508 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2513 } /* if hardwired */
2515 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2516 asserted, BAR_IGU_INTMEM + igu_addr);
2517 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2519 /* now set back the mask */
2520 if (asserted & ATTN_NIG_FOR_FUNC)
2521 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2524 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2526 int port = BP_PORT(bp);
2530 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2531 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2533 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2535 val = REG_RD(bp, reg_offset);
2536 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2537 REG_WR(bp, reg_offset, val);
2539 BNX2X_ERR("SPIO5 hw attention\n");
2541 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2542 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2543 /* Fan failure attention */
2545 /* The PHY reset is controled by GPIO 1 */
2546 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2547 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2548 /* Low power mode is controled by GPIO 2 */
2549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2550 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2551 /* mark the failure */
2552 bp->link_params.ext_phy_config &=
2553 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2554 bp->link_params.ext_phy_config |=
2555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2557 dev_info.port_hw_config[port].
2558 external_phy_config,
2559 bp->link_params.ext_phy_config);
2560 /* log the failure */
2561 printk(KERN_ERR PFX "Fan Failure on Network"
2562 " Controller %s has caused the driver to"
2563 " shutdown the card to prevent permanent"
2564 " damage. Please contact Dell Support for"
2565 " assistance\n", bp->dev->name);
2573 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2575 val = REG_RD(bp, reg_offset);
2576 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2577 REG_WR(bp, reg_offset, val);
2579 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2580 (attn & HW_INTERRUT_ASSERT_SET_0));
2585 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2589 if (attn & BNX2X_DOORQ_ASSERT) {
2591 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2592 BNX2X_ERR("DB hw attention 0x%x\n", val);
2593 /* DORQ discard attention */
2595 BNX2X_ERR("FATAL error from DORQ\n");
2598 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2600 int port = BP_PORT(bp);
2603 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2604 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2606 val = REG_RD(bp, reg_offset);
2607 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2608 REG_WR(bp, reg_offset, val);
2610 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2611 (attn & HW_INTERRUT_ASSERT_SET_1));
2616 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2620 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2622 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2623 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2624 /* CFC error attention */
2626 BNX2X_ERR("FATAL error from CFC\n");
2629 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2631 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2632 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2633 /* RQ_USDMDP_FIFO_OVERFLOW */
2635 BNX2X_ERR("FATAL error from PXP\n");
2638 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2640 int port = BP_PORT(bp);
2643 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2644 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2646 val = REG_RD(bp, reg_offset);
2647 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2648 REG_WR(bp, reg_offset, val);
2650 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2651 (attn & HW_INTERRUT_ASSERT_SET_2));
2656 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2660 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2662 if (attn & BNX2X_PMF_LINK_ASSERT) {
2663 int func = BP_FUNC(bp);
2665 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2666 bnx2x__link_status_update(bp);
2667 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2669 bnx2x_pmf_update(bp);
2671 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2673 BNX2X_ERR("MC assert!\n");
2674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2680 } else if (attn & BNX2X_MCP_ASSERT) {
2682 BNX2X_ERR("MCP assert!\n");
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2687 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2690 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2691 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2692 if (attn & BNX2X_GRC_TIMEOUT) {
2693 val = CHIP_IS_E1H(bp) ?
2694 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2695 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2697 if (attn & BNX2X_GRC_RSV) {
2698 val = CHIP_IS_E1H(bp) ?
2699 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2700 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2702 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2706 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2708 struct attn_route attn;
2709 struct attn_route group_mask;
2710 int port = BP_PORT(bp);
2715 /* need to take HW lock because MCP or other port might also
2716 try to handle this event */
2719 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2720 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2721 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2722 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2723 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2724 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2726 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2727 if (deasserted & (1 << index)) {
2728 group_mask = bp->attn_group[index];
2730 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2731 index, group_mask.sig[0], group_mask.sig[1],
2732 group_mask.sig[2], group_mask.sig[3]);
2734 bnx2x_attn_int_deasserted3(bp,
2735 attn.sig[3] & group_mask.sig[3]);
2736 bnx2x_attn_int_deasserted1(bp,
2737 attn.sig[1] & group_mask.sig[1]);
2738 bnx2x_attn_int_deasserted2(bp,
2739 attn.sig[2] & group_mask.sig[2]);
2740 bnx2x_attn_int_deasserted0(bp,
2741 attn.sig[0] & group_mask.sig[0]);
2743 if ((attn.sig[0] & group_mask.sig[0] &
2744 HW_PRTY_ASSERT_SET_0) ||
2745 (attn.sig[1] & group_mask.sig[1] &
2746 HW_PRTY_ASSERT_SET_1) ||
2747 (attn.sig[2] & group_mask.sig[2] &
2748 HW_PRTY_ASSERT_SET_2))
2749 BNX2X_ERR("FATAL HW block parity attention\n");
2753 bnx2x_unlock_alr(bp);
2755 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2758 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2759 val, BAR_IGU_INTMEM + reg_addr); */
2760 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2762 if (bp->aeu_mask & (deasserted & 0xff))
2763 BNX2X_ERR("IGU BUG!\n");
2764 if (~bp->attn_state & deasserted)
2765 BNX2X_ERR("IGU BUG!\n");
2767 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2768 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2770 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2771 bp->aeu_mask |= (deasserted & 0xff);
2773 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2774 REG_WR(bp, reg_addr, bp->aeu_mask);
2776 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2777 bp->attn_state &= ~deasserted;
2778 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2781 static void bnx2x_attn_int(struct bnx2x *bp)
2783 /* read local copy of bits */
2784 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2785 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2786 u32 attn_state = bp->attn_state;
2788 /* look for changed bits */
2789 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2790 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2793 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2794 attn_bits, attn_ack, asserted, deasserted);
2796 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2797 BNX2X_ERR("BAD attention state\n");
2799 /* handle bits that were raised */
2801 bnx2x_attn_int_asserted(bp, asserted);
2804 bnx2x_attn_int_deasserted(bp, deasserted);
2807 static void bnx2x_sp_task(struct work_struct *work)
2809 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2813 /* Return here if interrupt is disabled */
2814 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2815 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2819 status = bnx2x_update_dsb_idx(bp);
2820 /* if (status == 0) */
2821 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2823 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2829 /* CStorm events: query_stats, port delete ramrod */
2831 bp->stats_pending = 0;
2833 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2835 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2837 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2839 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2841 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2846 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2848 struct net_device *dev = dev_instance;
2849 struct bnx2x *bp = netdev_priv(dev);
2851 /* Return here if interrupt is disabled */
2852 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2853 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2857 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2859 #ifdef BNX2X_STOP_ON_ERROR
2860 if (unlikely(bp->panic))
2864 schedule_work(&bp->sp_task);
2869 /* end of slow path */
2873 /****************************************************************************
2875 ****************************************************************************/
2877 /* sum[hi:lo] += add[hi:lo] */
2878 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2881 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2884 /* difference = minuend - subtrahend */
2885 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2887 if (m_lo < s_lo) { \
2889 d_hi = m_hi - s_hi; \
2891 /* we can 'loan' 1 */ \
2893 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2895 /* m_hi <= s_hi */ \
2900 /* m_lo >= s_lo */ \
2901 if (m_hi < s_hi) { \
2905 /* m_hi >= s_hi */ \
2906 d_hi = m_hi - s_hi; \
2907 d_lo = m_lo - s_lo; \
2912 #define UPDATE_STAT64(s, t) \
2914 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2915 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2916 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2917 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2918 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2919 pstats->mac_stx[1].t##_lo, diff.lo); \
2922 #define UPDATE_STAT64_NIG(s, t) \
2924 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2925 diff.lo, new->s##_lo, old->s##_lo); \
2926 ADD_64(estats->t##_hi, diff.hi, \
2927 estats->t##_lo, diff.lo); \
2930 /* sum[hi:lo] += add */
2931 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2934 s_hi += (s_lo < a) ? 1 : 0; \
2937 #define UPDATE_EXTEND_STAT(s) \
2939 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2940 pstats->mac_stx[1].s##_lo, \
2944 #define UPDATE_EXTEND_TSTAT(s, t) \
2946 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2947 old_tclient->s = le32_to_cpu(tclient->s); \
2948 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2951 #define UPDATE_EXTEND_XSTAT(s, t) \
2953 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2954 old_xclient->s = le32_to_cpu(xclient->s); \
2955 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2959 * General service functions
2962 static inline long bnx2x_hilo(u32 *hiref)
2964 u32 lo = *(hiref + 1);
2965 #if (BITS_PER_LONG == 64)
2968 return HILO_U64(hi, lo);
2975 * Init service functions
2978 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2980 if (!bp->stats_pending) {
2981 struct eth_query_ramrod_data ramrod_data = {0};
2984 ramrod_data.drv_counter = bp->stats_counter++;
2985 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2986 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2988 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2989 ((u32 *)&ramrod_data)[1],
2990 ((u32 *)&ramrod_data)[0], 0);
2992 /* stats ramrod has it's own slot on the spq */
2994 bp->stats_pending = 1;
2999 static void bnx2x_stats_init(struct bnx2x *bp)
3001 int port = BP_PORT(bp);
3003 bp->executer_idx = 0;
3004 bp->stats_counter = 0;
3008 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3010 bp->port.port_stx = 0;
3011 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3013 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3014 bp->port.old_nig_stats.brb_discard =
3015 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3016 bp->port.old_nig_stats.brb_truncate =
3017 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3018 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3019 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3020 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3021 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3023 /* function stats */
3024 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3025 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3026 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3027 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3029 bp->stats_state = STATS_STATE_DISABLED;
3030 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3031 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3034 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3036 struct dmae_command *dmae = &bp->stats_dmae;
3037 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3039 *stats_comp = DMAE_COMP_VAL;
3042 if (bp->executer_idx) {
3043 int loader_idx = PMF_DMAE_C(bp);
3045 memset(dmae, 0, sizeof(struct dmae_command));
3047 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3048 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3049 DMAE_CMD_DST_RESET |
3051 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3053 DMAE_CMD_ENDIANITY_DW_SWAP |
3055 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3057 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3058 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3059 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3060 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3061 sizeof(struct dmae_command) *
3062 (loader_idx + 1)) >> 2;
3063 dmae->dst_addr_hi = 0;
3064 dmae->len = sizeof(struct dmae_command) >> 2;
3067 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3068 dmae->comp_addr_hi = 0;
3072 bnx2x_post_dmae(bp, dmae, loader_idx);
3074 } else if (bp->func_stx) {
3076 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3080 static int bnx2x_stats_comp(struct bnx2x *bp)
3082 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3086 while (*stats_comp != DMAE_COMP_VAL) {
3089 BNX2X_ERR("timeout waiting for stats finished\n");
3098 * Statistics service functions
3101 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3103 struct dmae_command *dmae;
3105 int loader_idx = PMF_DMAE_C(bp);
3106 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3109 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3110 BNX2X_ERR("BUG!\n");
3114 bp->executer_idx = 0;
3116 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3118 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3120 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3122 DMAE_CMD_ENDIANITY_DW_SWAP |
3124 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3125 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3127 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3128 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3129 dmae->src_addr_lo = bp->port.port_stx >> 2;
3130 dmae->src_addr_hi = 0;
3131 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3132 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3133 dmae->len = DMAE_LEN32_RD_MAX;
3134 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3135 dmae->comp_addr_hi = 0;
3138 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3139 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3140 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3141 dmae->src_addr_hi = 0;
3142 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3143 DMAE_LEN32_RD_MAX * 4);
3144 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3145 DMAE_LEN32_RD_MAX * 4);
3146 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3147 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3148 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3149 dmae->comp_val = DMAE_COMP_VAL;
3152 bnx2x_hw_stats_post(bp);
3153 bnx2x_stats_comp(bp);
3156 static void bnx2x_port_stats_init(struct bnx2x *bp)
3158 struct dmae_command *dmae;
3159 int port = BP_PORT(bp);
3160 int vn = BP_E1HVN(bp);
3162 int loader_idx = PMF_DMAE_C(bp);
3164 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3167 if (!bp->link_vars.link_up || !bp->port.pmf) {
3168 BNX2X_ERR("BUG!\n");
3172 bp->executer_idx = 0;
3175 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3176 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3177 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3179 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3181 DMAE_CMD_ENDIANITY_DW_SWAP |
3183 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3184 (vn << DMAE_CMD_E1HVN_SHIFT));
3186 if (bp->port.port_stx) {
3188 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3189 dmae->opcode = opcode;
3190 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3191 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3192 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3193 dmae->dst_addr_hi = 0;
3194 dmae->len = sizeof(struct host_port_stats) >> 2;
3195 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3196 dmae->comp_addr_hi = 0;
3202 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3203 dmae->opcode = opcode;
3204 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3205 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3206 dmae->dst_addr_lo = bp->func_stx >> 2;
3207 dmae->dst_addr_hi = 0;
3208 dmae->len = sizeof(struct host_func_stats) >> 2;
3209 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3210 dmae->comp_addr_hi = 0;
3215 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3216 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3217 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3219 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3221 DMAE_CMD_ENDIANITY_DW_SWAP |
3223 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3224 (vn << DMAE_CMD_E1HVN_SHIFT));
3226 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3228 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3229 NIG_REG_INGRESS_BMAC0_MEM);
3231 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3232 BIGMAC_REGISTER_TX_STAT_GTBYT */
3233 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3234 dmae->opcode = opcode;
3235 dmae->src_addr_lo = (mac_addr +
3236 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3237 dmae->src_addr_hi = 0;
3238 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3239 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3240 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3241 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3242 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3243 dmae->comp_addr_hi = 0;
3246 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3247 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = (mac_addr +
3251 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3252 dmae->src_addr_hi = 0;
3253 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3254 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3256 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3257 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3258 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3259 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3260 dmae->comp_addr_hi = 0;
3263 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3265 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3267 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3268 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3269 dmae->opcode = opcode;
3270 dmae->src_addr_lo = (mac_addr +
3271 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3272 dmae->src_addr_hi = 0;
3273 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3274 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3275 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3276 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3277 dmae->comp_addr_hi = 0;
3280 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3281 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3282 dmae->opcode = opcode;
3283 dmae->src_addr_lo = (mac_addr +
3284 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3285 dmae->src_addr_hi = 0;
3286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3287 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3289 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3295 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3302 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3303 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3304 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3305 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3315 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3319 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321 dmae->comp_addr_hi = 0;
3324 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3325 dmae->opcode = opcode;
3326 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3327 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3328 dmae->src_addr_hi = 0;
3329 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3330 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3331 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3332 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3333 dmae->len = (2*sizeof(u32)) >> 2;
3334 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3335 dmae->comp_addr_hi = 0;
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3340 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3341 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3343 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3345 DMAE_CMD_ENDIANITY_DW_SWAP |
3347 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3348 (vn << DMAE_CMD_E1HVN_SHIFT));
3349 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3350 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3351 dmae->src_addr_hi = 0;
3352 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3353 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3354 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3355 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3356 dmae->len = (2*sizeof(u32)) >> 2;
3357 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3358 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3359 dmae->comp_val = DMAE_COMP_VAL;
3364 static void bnx2x_func_stats_init(struct bnx2x *bp)
3366 struct dmae_command *dmae = &bp->stats_dmae;
3367 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3370 if (!bp->func_stx) {
3371 BNX2X_ERR("BUG!\n");
3375 bp->executer_idx = 0;
3376 memset(dmae, 0, sizeof(struct dmae_command));
3378 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3379 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3380 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3382 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3384 DMAE_CMD_ENDIANITY_DW_SWAP |
3386 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3387 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3388 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3389 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3390 dmae->dst_addr_lo = bp->func_stx >> 2;
3391 dmae->dst_addr_hi = 0;
3392 dmae->len = sizeof(struct host_func_stats) >> 2;
3393 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3394 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3395 dmae->comp_val = DMAE_COMP_VAL;
3400 static void bnx2x_stats_start(struct bnx2x *bp)
3403 bnx2x_port_stats_init(bp);
3405 else if (bp->func_stx)
3406 bnx2x_func_stats_init(bp);
3408 bnx2x_hw_stats_post(bp);
3409 bnx2x_storm_stats_post(bp);
3412 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3414 bnx2x_stats_comp(bp);
3415 bnx2x_stats_pmf_update(bp);
3416 bnx2x_stats_start(bp);
3419 static void bnx2x_stats_restart(struct bnx2x *bp)
3421 bnx2x_stats_comp(bp);
3422 bnx2x_stats_start(bp);
3425 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3427 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3428 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3429 struct regpair diff;
3431 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3432 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3433 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3434 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3435 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3436 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3437 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3438 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3439 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3440 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3441 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3442 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3443 UPDATE_STAT64(tx_stat_gt127,
3444 tx_stat_etherstatspkts65octetsto127octets);
3445 UPDATE_STAT64(tx_stat_gt255,
3446 tx_stat_etherstatspkts128octetsto255octets);
3447 UPDATE_STAT64(tx_stat_gt511,
3448 tx_stat_etherstatspkts256octetsto511octets);
3449 UPDATE_STAT64(tx_stat_gt1023,
3450 tx_stat_etherstatspkts512octetsto1023octets);
3451 UPDATE_STAT64(tx_stat_gt1518,
3452 tx_stat_etherstatspkts1024octetsto1522octets);
3453 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3454 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3455 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3456 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3457 UPDATE_STAT64(tx_stat_gterr,
3458 tx_stat_dot3statsinternalmactransmiterrors);
3459 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3462 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3464 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3465 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3467 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3468 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3469 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3470 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3471 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3472 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3473 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3474 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3475 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3476 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3477 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3478 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3479 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3480 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3481 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3482 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3483 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3484 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3485 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3486 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3487 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3488 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3489 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3490 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3491 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3492 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3493 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3494 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3495 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3496 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3497 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3500 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3502 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3503 struct nig_stats *old = &(bp->port.old_nig_stats);
3504 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3505 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3506 struct regpair diff;
3508 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3509 bnx2x_bmac_stats_update(bp);
3511 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3512 bnx2x_emac_stats_update(bp);
3514 else { /* unreached */
3515 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3519 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3520 new->brb_discard - old->brb_discard);
3521 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3522 new->brb_truncate - old->brb_truncate);
3524 UPDATE_STAT64_NIG(egress_mac_pkt0,
3525 etherstatspkts1024octetsto1522octets);
3526 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3528 memcpy(old, new, sizeof(struct nig_stats));
3530 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3531 sizeof(struct mac_stx));
3532 estats->brb_drop_hi = pstats->brb_drop_hi;
3533 estats->brb_drop_lo = pstats->brb_drop_lo;
3535 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3540 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3542 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3543 int cl_id = BP_CL_ID(bp);
3544 struct tstorm_per_port_stats *tport =
3545 &stats->tstorm_common.port_statistics;
3546 struct tstorm_per_client_stats *tclient =
3547 &stats->tstorm_common.client_statistics[cl_id];
3548 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3549 struct xstorm_per_client_stats *xclient =
3550 &stats->xstorm_common.client_statistics[cl_id];
3551 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3552 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3553 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3556 /* are storm stats valid? */
3557 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3558 bp->stats_counter) {
3559 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3560 " tstorm counter (%d) != stats_counter (%d)\n",
3561 tclient->stats_counter, bp->stats_counter);
3564 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3565 bp->stats_counter) {
3566 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3567 " xstorm counter (%d) != stats_counter (%d)\n",
3568 xclient->stats_counter, bp->stats_counter);
3572 fstats->total_bytes_received_hi =
3573 fstats->valid_bytes_received_hi =
3574 le32_to_cpu(tclient->total_rcv_bytes.hi);
3575 fstats->total_bytes_received_lo =
3576 fstats->valid_bytes_received_lo =
3577 le32_to_cpu(tclient->total_rcv_bytes.lo);
3579 estats->error_bytes_received_hi =
3580 le32_to_cpu(tclient->rcv_error_bytes.hi);
3581 estats->error_bytes_received_lo =
3582 le32_to_cpu(tclient->rcv_error_bytes.lo);
3583 ADD_64(estats->error_bytes_received_hi,
3584 estats->rx_stat_ifhcinbadoctets_hi,
3585 estats->error_bytes_received_lo,
3586 estats->rx_stat_ifhcinbadoctets_lo);
3588 ADD_64(fstats->total_bytes_received_hi,
3589 estats->error_bytes_received_hi,
3590 fstats->total_bytes_received_lo,
3591 estats->error_bytes_received_lo);
3593 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3594 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3595 total_multicast_packets_received);
3596 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3597 total_broadcast_packets_received);
3599 fstats->total_bytes_transmitted_hi =
3600 le32_to_cpu(xclient->total_sent_bytes.hi);
3601 fstats->total_bytes_transmitted_lo =
3602 le32_to_cpu(xclient->total_sent_bytes.lo);
3604 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3605 total_unicast_packets_transmitted);
3606 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3607 total_multicast_packets_transmitted);
3608 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3609 total_broadcast_packets_transmitted);
3611 memcpy(estats, &(fstats->total_bytes_received_hi),
3612 sizeof(struct host_func_stats) - 2*sizeof(u32));
3614 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3615 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3616 estats->brb_truncate_discard =
3617 le32_to_cpu(tport->brb_truncate_discard);
3618 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3620 old_tclient->rcv_unicast_bytes.hi =
3621 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3622 old_tclient->rcv_unicast_bytes.lo =
3623 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3624 old_tclient->rcv_broadcast_bytes.hi =
3625 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3626 old_tclient->rcv_broadcast_bytes.lo =
3627 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3628 old_tclient->rcv_multicast_bytes.hi =
3629 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3630 old_tclient->rcv_multicast_bytes.lo =
3631 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3632 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3634 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3635 old_tclient->packets_too_big_discard =
3636 le32_to_cpu(tclient->packets_too_big_discard);
3637 estats->no_buff_discard =
3638 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3639 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3641 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3642 old_xclient->unicast_bytes_sent.hi =
3643 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3644 old_xclient->unicast_bytes_sent.lo =
3645 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3646 old_xclient->multicast_bytes_sent.hi =
3647 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3648 old_xclient->multicast_bytes_sent.lo =
3649 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3650 old_xclient->broadcast_bytes_sent.hi =
3651 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3652 old_xclient->broadcast_bytes_sent.lo =
3653 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3655 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3660 static void bnx2x_net_stats_update(struct bnx2x *bp)
3662 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3663 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3664 struct net_device_stats *nstats = &bp->dev->stats;
3666 nstats->rx_packets =
3667 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3668 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3669 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3671 nstats->tx_packets =
3672 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3673 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3674 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3676 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3678 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3680 nstats->rx_dropped = old_tclient->checksum_discard +
3681 estats->mac_discard;
3682 nstats->tx_dropped = 0;
3685 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3687 nstats->collisions =
3688 estats->tx_stat_dot3statssinglecollisionframes_lo +
3689 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3690 estats->tx_stat_dot3statslatecollisions_lo +
3691 estats->tx_stat_dot3statsexcessivecollisions_lo;
3693 estats->jabber_packets_received =
3694 old_tclient->packets_too_big_discard +
3695 estats->rx_stat_dot3statsframestoolong_lo;
3697 nstats->rx_length_errors =
3698 estats->rx_stat_etherstatsundersizepkts_lo +
3699 estats->jabber_packets_received;
3700 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3701 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3702 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3703 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3704 nstats->rx_missed_errors = estats->xxoverflow_discard;
3706 nstats->rx_errors = nstats->rx_length_errors +
3707 nstats->rx_over_errors +
3708 nstats->rx_crc_errors +
3709 nstats->rx_frame_errors +
3710 nstats->rx_fifo_errors +
3711 nstats->rx_missed_errors;
3713 nstats->tx_aborted_errors =
3714 estats->tx_stat_dot3statslatecollisions_lo +
3715 estats->tx_stat_dot3statsexcessivecollisions_lo;
3716 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3717 nstats->tx_fifo_errors = 0;
3718 nstats->tx_heartbeat_errors = 0;
3719 nstats->tx_window_errors = 0;
3721 nstats->tx_errors = nstats->tx_aborted_errors +
3722 nstats->tx_carrier_errors;
3725 static void bnx2x_stats_update(struct bnx2x *bp)
3727 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3730 if (*stats_comp != DMAE_COMP_VAL)
3734 update = (bnx2x_hw_stats_update(bp) == 0);
3736 update |= (bnx2x_storm_stats_update(bp) == 0);
3739 bnx2x_net_stats_update(bp);
3742 if (bp->stats_pending) {
3743 bp->stats_pending++;
3744 if (bp->stats_pending == 3) {
3745 BNX2X_ERR("stats not updated for 3 times\n");
3752 if (bp->msglevel & NETIF_MSG_TIMER) {
3753 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3754 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3755 struct net_device_stats *nstats = &bp->dev->stats;
3758 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3759 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3761 bnx2x_tx_avail(bp->fp),
3762 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3763 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3765 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3766 bp->fp->rx_comp_cons),
3767 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3768 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3769 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3770 estats->driver_xoff, estats->brb_drop_lo);
3771 printk(KERN_DEBUG "tstats: checksum_discard %u "
3772 "packets_too_big_discard %u no_buff_discard %u "
3773 "mac_discard %u mac_filter_discard %u "
3774 "xxovrflow_discard %u brb_truncate_discard %u "
3775 "ttl0_discard %u\n",
3776 old_tclient->checksum_discard,
3777 old_tclient->packets_too_big_discard,
3778 old_tclient->no_buff_discard, estats->mac_discard,
3779 estats->mac_filter_discard, estats->xxoverflow_discard,
3780 estats->brb_truncate_discard,
3781 old_tclient->ttl0_discard);
3783 for_each_queue(bp, i) {
3784 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3785 bnx2x_fp(bp, i, tx_pkt),
3786 bnx2x_fp(bp, i, rx_pkt),
3787 bnx2x_fp(bp, i, rx_calls));
3791 bnx2x_hw_stats_post(bp);
3792 bnx2x_storm_stats_post(bp);
3795 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3797 struct dmae_command *dmae;
3799 int loader_idx = PMF_DMAE_C(bp);
3800 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3802 bp->executer_idx = 0;
3804 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3806 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3808 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3810 DMAE_CMD_ENDIANITY_DW_SWAP |
3812 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3813 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3815 if (bp->port.port_stx) {
3817 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3819 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3821 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3822 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3823 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3824 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3825 dmae->dst_addr_hi = 0;
3826 dmae->len = sizeof(struct host_port_stats) >> 2;
3828 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3829 dmae->comp_addr_hi = 0;
3832 dmae->comp_addr_lo =
3833 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3834 dmae->comp_addr_hi =
3835 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3836 dmae->comp_val = DMAE_COMP_VAL;
3844 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3845 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3846 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3847 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3848 dmae->dst_addr_lo = bp->func_stx >> 2;
3849 dmae->dst_addr_hi = 0;
3850 dmae->len = sizeof(struct host_func_stats) >> 2;
3851 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3852 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3853 dmae->comp_val = DMAE_COMP_VAL;
3859 static void bnx2x_stats_stop(struct bnx2x *bp)
3863 bnx2x_stats_comp(bp);
3866 update = (bnx2x_hw_stats_update(bp) == 0);
3868 update |= (bnx2x_storm_stats_update(bp) == 0);
3871 bnx2x_net_stats_update(bp);
3874 bnx2x_port_stats_stop(bp);
3876 bnx2x_hw_stats_post(bp);
3877 bnx2x_stats_comp(bp);
3881 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3885 static const struct {
3886 void (*action)(struct bnx2x *bp);
3887 enum bnx2x_stats_state next_state;
3888 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3891 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3892 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3893 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3894 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3897 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3898 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3899 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3900 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3904 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3906 enum bnx2x_stats_state state = bp->stats_state;
3908 bnx2x_stats_stm[state][event].action(bp);
3909 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3911 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3912 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3913 state, event, bp->stats_state);
3916 static void bnx2x_timer(unsigned long data)
3918 struct bnx2x *bp = (struct bnx2x *) data;
3920 if (!netif_running(bp->dev))
3923 if (atomic_read(&bp->intr_sem) != 0)
3927 struct bnx2x_fastpath *fp = &bp->fp[0];
3930 bnx2x_tx_int(fp, 1000);
3931 rc = bnx2x_rx_int(fp, 1000);
3934 if (!BP_NOMCP(bp)) {
3935 int func = BP_FUNC(bp);
3939 ++bp->fw_drv_pulse_wr_seq;
3940 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3941 /* TBD - add SYSTEM_TIME */
3942 drv_pulse = bp->fw_drv_pulse_wr_seq;
3943 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3945 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3946 MCP_PULSE_SEQ_MASK);
3947 /* The delta between driver pulse and mcp response
3948 * should be 1 (before mcp response) or 0 (after mcp response)
3950 if ((drv_pulse != mcp_pulse) &&
3951 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3952 /* someone lost a heartbeat... */
3953 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3954 drv_pulse, mcp_pulse);
3958 if ((bp->state == BNX2X_STATE_OPEN) ||
3959 (bp->state == BNX2X_STATE_DISABLED))
3960 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3963 mod_timer(&bp->timer, jiffies + bp->current_interval);
3966 /* end of Statistics */
3971 * nic init service functions
3974 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3976 int port = BP_PORT(bp);
3978 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3979 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3980 sizeof(struct ustorm_def_status_block)/4);
3981 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3982 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3983 sizeof(struct cstorm_def_status_block)/4);
3986 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3987 struct host_status_block *sb, dma_addr_t mapping)
3989 int port = BP_PORT(bp);
3990 int func = BP_FUNC(bp);
3995 section = ((u64)mapping) + offsetof(struct host_status_block,
3997 sb->u_status_block.status_block_id = sb_id;
3999 REG_WR(bp, BAR_USTRORM_INTMEM +
4000 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4001 REG_WR(bp, BAR_USTRORM_INTMEM +
4002 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4004 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4005 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4007 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4008 REG_WR16(bp, BAR_USTRORM_INTMEM +
4009 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4012 section = ((u64)mapping) + offsetof(struct host_status_block,
4014 sb->c_status_block.status_block_id = sb_id;
4016 REG_WR(bp, BAR_CSTRORM_INTMEM +
4017 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4018 REG_WR(bp, BAR_CSTRORM_INTMEM +
4019 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4021 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4022 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4024 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4025 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4026 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4028 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4031 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4033 int func = BP_FUNC(bp);
4035 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4036 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4037 sizeof(struct ustorm_def_status_block)/4);
4038 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4039 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4040 sizeof(struct cstorm_def_status_block)/4);
4041 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4042 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4043 sizeof(struct xstorm_def_status_block)/4);
4044 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4045 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4046 sizeof(struct tstorm_def_status_block)/4);
4049 static void bnx2x_init_def_sb(struct bnx2x *bp,
4050 struct host_def_status_block *def_sb,
4051 dma_addr_t mapping, int sb_id)
4053 int port = BP_PORT(bp);
4054 int func = BP_FUNC(bp);
4055 int index, val, reg_offset;
4059 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4060 atten_status_block);
4061 def_sb->atten_status_block.status_block_id = sb_id;
4063 bp->def_att_idx = 0;
4066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4069 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4070 bp->attn_group[index].sig[0] = REG_RD(bp,
4071 reg_offset + 0x10*index);
4072 bp->attn_group[index].sig[1] = REG_RD(bp,
4073 reg_offset + 0x4 + 0x10*index);
4074 bp->attn_group[index].sig[2] = REG_RD(bp,
4075 reg_offset + 0x8 + 0x10*index);
4076 bp->attn_group[index].sig[3] = REG_RD(bp,
4077 reg_offset + 0xc + 0x10*index);
4080 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4081 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4083 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4084 HC_REG_ATTN_MSG0_ADDR_L);
4086 REG_WR(bp, reg_offset, U64_LO(section));
4087 REG_WR(bp, reg_offset + 4, U64_HI(section));
4089 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4091 val = REG_RD(bp, reg_offset);
4093 REG_WR(bp, reg_offset, val);
4096 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4097 u_def_status_block);
4098 def_sb->u_def_status_block.status_block_id = sb_id;
4102 REG_WR(bp, BAR_USTRORM_INTMEM +
4103 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4104 REG_WR(bp, BAR_USTRORM_INTMEM +
4105 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4107 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4108 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4109 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4112 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4113 REG_WR16(bp, BAR_USTRORM_INTMEM +
4114 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4117 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4118 c_def_status_block);
4119 def_sb->c_def_status_block.status_block_id = sb_id;
4123 REG_WR(bp, BAR_CSTRORM_INTMEM +
4124 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4125 REG_WR(bp, BAR_CSTRORM_INTMEM +
4126 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4128 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4129 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4130 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4133 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4134 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4135 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4138 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4139 t_def_status_block);
4140 def_sb->t_def_status_block.status_block_id = sb_id;
4144 REG_WR(bp, BAR_TSTRORM_INTMEM +
4145 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4147 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4149 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4150 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4151 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4156 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4159 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160 x_def_status_block);
4161 def_sb->x_def_status_block.status_block_id = sb_id;
4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4166 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4167 REG_WR(bp, BAR_XSTRORM_INTMEM +
4168 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4170 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4171 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4172 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4175 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4176 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4177 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4179 bp->stats_pending = 0;
4180 bp->set_mac_pending = 0;
4182 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4185 static void bnx2x_update_coalesce(struct bnx2x *bp)
4187 int port = BP_PORT(bp);
4190 for_each_queue(bp, i) {
4191 int sb_id = bp->fp[i].sb_id;
4193 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4194 REG_WR8(bp, BAR_USTRORM_INTMEM +
4195 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4196 HC_INDEX_U_ETH_RX_CQ_CONS),
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 HC_INDEX_U_ETH_RX_CQ_CONS),
4201 bp->rx_ticks ? 0 : 1);
4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206 HC_INDEX_C_ETH_TX_CQ_CONS),
4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210 HC_INDEX_C_ETH_TX_CQ_CONS),
4211 bp->tx_ticks ? 0 : 1);
4215 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216 struct bnx2x_fastpath *fp, int last)
4220 for (i = 0; i < last; i++) {
4221 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222 struct sk_buff *skb = rx_buf->skb;
4225 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_use_size,
4233 PCI_DMA_FROMDEVICE);
4240 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4242 int func = BP_FUNC(bp);
4243 u16 ring_prod, cqe_ring_prod = 0;
4246 bp->rx_buf_use_size = bp->dev->mtu;
4247 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4248 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4250 if (bp->flags & TPA_ENABLE_FLAG) {
4252 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4253 bp->rx_buf_use_size, bp->rx_buf_size,
4254 bp->dev->mtu + ETH_OVREHEAD);
4256 for_each_queue(bp, j) {
4257 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4258 struct bnx2x_fastpath *fp = &bp->fp[j];
4260 fp->tpa_pool[i].skb =
4261 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4262 if (!fp->tpa_pool[i].skb) {
4263 BNX2X_ERR("Failed to allocate TPA "
4264 "skb pool for queue[%d] - "
4265 "disabling TPA on this "
4267 bnx2x_free_tpa_pool(bp, fp, i);
4268 fp->disable_tpa = 1;
4271 pci_unmap_addr_set((struct sw_rx_bd *)
4272 &bp->fp->tpa_pool[i],
4274 fp->tpa_state[i] = BNX2X_TPA_STOP;
4279 for_each_queue(bp, j) {
4280 struct bnx2x_fastpath *fp = &bp->fp[j];
4283 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4284 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4286 /* "next page" elements initialization */
4288 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4289 struct eth_rx_sge *sge;
4291 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4293 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4294 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4296 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4297 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4300 bnx2x_init_sge_ring_bit_mask(fp);
4303 for (i = 1; i <= NUM_RX_RINGS; i++) {
4304 struct eth_rx_bd *rx_bd;
4306 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4308 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4311 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4312 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4316 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4317 struct eth_rx_cqe_next_page *nextpg;
4319 nextpg = (struct eth_rx_cqe_next_page *)
4320 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4322 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4323 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4325 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4326 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4329 /* Allocate SGEs and initialize the ring elements */
4330 for (i = 0, ring_prod = 0;
4331 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4333 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4334 BNX2X_ERR("was only able to allocate "
4336 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4337 /* Cleanup already allocated elements */
4338 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4339 bnx2x_free_tpa_pool(bp, fp,
4340 ETH_MAX_AGGREGATION_QUEUES_E1H);
4341 fp->disable_tpa = 1;
4345 ring_prod = NEXT_SGE_IDX(ring_prod);
4347 fp->rx_sge_prod = ring_prod;
4349 /* Allocate BDs and initialize BD ring */
4350 fp->rx_comp_cons = 0;
4351 cqe_ring_prod = ring_prod = 0;
4352 for (i = 0; i < bp->rx_ring_size; i++) {
4353 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354 BNX2X_ERR("was only able to allocate "
4356 bp->eth_stats.rx_skb_alloc_failed++;
4359 ring_prod = NEXT_RX_IDX(ring_prod);
4360 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4361 WARN_ON(ring_prod <= i);
4364 fp->rx_bd_prod = ring_prod;
4365 /* must not have more available CQEs than BDs */
4366 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4368 fp->rx_pkt = fp->rx_calls = 0;
4371 * this will generate an interrupt (to the TSTORM)
4372 * must only be done after chip is initialized
4374 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4379 REG_WR(bp, BAR_USTRORM_INTMEM +
4380 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4381 U64_LO(fp->rx_comp_mapping));
4382 REG_WR(bp, BAR_USTRORM_INTMEM +
4383 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4384 U64_HI(fp->rx_comp_mapping));
4388 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4392 for_each_queue(bp, j) {
4393 struct bnx2x_fastpath *fp = &bp->fp[j];
4395 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396 struct eth_tx_bd *tx_bd =
4397 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4400 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4401 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4403 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4404 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4407 fp->tx_pkt_prod = 0;
4408 fp->tx_pkt_cons = 0;
4411 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4416 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4418 int func = BP_FUNC(bp);
4420 spin_lock_init(&bp->spq_lock);
4422 bp->spq_left = MAX_SPQ_PENDING;
4423 bp->spq_prod_idx = 0;
4424 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425 bp->spq_prod_bd = bp->spq;
4426 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4428 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4429 U64_LO(bp->spq_mapping));
4431 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4432 U64_HI(bp->spq_mapping));
4434 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4438 static void bnx2x_init_context(struct bnx2x *bp)
4442 for_each_queue(bp, i) {
4443 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444 struct bnx2x_fastpath *fp = &bp->fp[i];
4445 u8 sb_id = FP_SB_ID(fp);
4447 context->xstorm_st_context.tx_bd_page_base_hi =
4448 U64_HI(fp->tx_desc_mapping);
4449 context->xstorm_st_context.tx_bd_page_base_lo =
4450 U64_LO(fp->tx_desc_mapping);
4451 context->xstorm_st_context.db_data_addr_hi =
4452 U64_HI(fp->tx_prods_mapping);
4453 context->xstorm_st_context.db_data_addr_lo =
4454 U64_LO(fp->tx_prods_mapping);
4455 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4458 context->ustorm_st_context.common.sb_index_numbers =
4459 BNX2X_RX_SB_INDEX_NUM;
4460 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461 context->ustorm_st_context.common.status_block_id = sb_id;
4462 context->ustorm_st_context.common.flags =
4463 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464 context->ustorm_st_context.common.mc_alignment_size = 64;
4465 context->ustorm_st_context.common.bd_buff_size =
4466 bp->rx_buf_use_size;
4467 context->ustorm_st_context.common.bd_page_base_hi =
4468 U64_HI(fp->rx_desc_mapping);
4469 context->ustorm_st_context.common.bd_page_base_lo =
4470 U64_LO(fp->rx_desc_mapping);
4471 if (!fp->disable_tpa) {
4472 context->ustorm_st_context.common.flags |=
4473 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475 context->ustorm_st_context.common.sge_buff_size =
4476 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477 context->ustorm_st_context.common.sge_page_base_hi =
4478 U64_HI(fp->rx_sge_mapping);
4479 context->ustorm_st_context.common.sge_page_base_lo =
4480 U64_LO(fp->rx_sge_mapping);
4483 context->cstorm_st_context.sb_index_number =
4484 HC_INDEX_C_ETH_TX_CQ_CONS;
4485 context->cstorm_st_context.status_block_id = sb_id;
4487 context->xstorm_ag_context.cdu_reserved =
4488 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489 CDU_REGION_NUMBER_XCM_AG,
4490 ETH_CONNECTION_TYPE);
4491 context->ustorm_ag_context.cdu_usage =
4492 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493 CDU_REGION_NUMBER_UCM_AG,
4494 ETH_CONNECTION_TYPE);
4498 static void bnx2x_init_ind_table(struct bnx2x *bp)
4500 int port = BP_PORT(bp);
4506 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4507 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4508 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4510 i % bp->num_queues);
4512 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4515 static void bnx2x_set_client_config(struct bnx2x *bp)
4517 struct tstorm_eth_client_config tstorm_client = {0};
4518 int port = BP_PORT(bp);
4521 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4522 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4523 tstorm_client.config_flags =
4524 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4526 if (bp->rx_mode && bp->vlgrp) {
4527 tstorm_client.config_flags |=
4528 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4533 if (bp->flags & TPA_ENABLE_FLAG) {
4534 tstorm_client.max_sges_for_packet =
4535 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536 tstorm_client.max_sges_for_packet =
4537 ((tstorm_client.max_sges_for_packet +
4538 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539 PAGES_PER_SGE_SHIFT;
4541 tstorm_client.config_flags |=
4542 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4545 for_each_queue(bp, i) {
4546 REG_WR(bp, BAR_TSTRORM_INTMEM +
4547 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4548 ((u32 *)&tstorm_client)[0]);
4549 REG_WR(bp, BAR_TSTRORM_INTMEM +
4550 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4551 ((u32 *)&tstorm_client)[1]);
4554 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4558 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4560 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4561 int mode = bp->rx_mode;
4562 int mask = (1 << BP_L_ID(bp));
4563 int func = BP_FUNC(bp);
4566 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4569 case BNX2X_RX_MODE_NONE: /* no Rx */
4570 tstorm_mac_filter.ucast_drop_all = mask;
4571 tstorm_mac_filter.mcast_drop_all = mask;
4572 tstorm_mac_filter.bcast_drop_all = mask;
4574 case BNX2X_RX_MODE_NORMAL:
4575 tstorm_mac_filter.bcast_accept_all = mask;
4577 case BNX2X_RX_MODE_ALLMULTI:
4578 tstorm_mac_filter.mcast_accept_all = mask;
4579 tstorm_mac_filter.bcast_accept_all = mask;
4581 case BNX2X_RX_MODE_PROMISC:
4582 tstorm_mac_filter.ucast_accept_all = mask;
4583 tstorm_mac_filter.mcast_accept_all = mask;
4584 tstorm_mac_filter.bcast_accept_all = mask;
4587 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4591 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592 REG_WR(bp, BAR_TSTRORM_INTMEM +
4593 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4594 ((u32 *)&tstorm_mac_filter)[i]);
4596 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4597 ((u32 *)&tstorm_mac_filter)[i]); */
4600 if (mode != BNX2X_RX_MODE_NONE)
4601 bnx2x_set_client_config(bp);
4604 static void bnx2x_init_internal_common(struct bnx2x *bp)
4608 /* Zero this manually as its initialization is
4609 currently missing in the initTool */
4610 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4611 REG_WR(bp, BAR_USTRORM_INTMEM +
4612 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4615 static void bnx2x_init_internal_port(struct bnx2x *bp)
4617 int port = BP_PORT(bp);
4619 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4620 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4625 static void bnx2x_init_internal_func(struct bnx2x *bp)
4627 struct tstorm_eth_function_common_config tstorm_config = {0};
4628 struct stats_indication_flags stats_flags = {0};
4629 int port = BP_PORT(bp);
4630 int func = BP_FUNC(bp);
4635 tstorm_config.config_flags = MULTI_FLAGS;
4636 tstorm_config.rss_result_mask = MULTI_MASK;
4639 tstorm_config.leading_client_id = BP_L_ID(bp);
4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
4642 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4643 (*(u32 *)&tstorm_config));
4645 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4646 bnx2x_set_storm_rx_mode(bp);
4648 /* reset xstorm per client statistics */
4649 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4650 REG_WR(bp, BAR_XSTRORM_INTMEM +
4651 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4654 /* reset tstorm per client statistics */
4655 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4656 REG_WR(bp, BAR_TSTRORM_INTMEM +
4657 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4661 /* Init statistics related context */
4662 stats_flags.collect_eth = 1;
4664 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4665 ((u32 *)&stats_flags)[0]);
4666 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4667 ((u32 *)&stats_flags)[1]);
4669 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4670 ((u32 *)&stats_flags)[0]);
4671 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4672 ((u32 *)&stats_flags)[1]);
4674 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4675 ((u32 *)&stats_flags)[0]);
4676 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4677 ((u32 *)&stats_flags)[1]);
4679 REG_WR(bp, BAR_XSTRORM_INTMEM +
4680 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682 REG_WR(bp, BAR_XSTRORM_INTMEM +
4683 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4686 REG_WR(bp, BAR_TSTRORM_INTMEM +
4687 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4688 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4689 REG_WR(bp, BAR_TSTRORM_INTMEM +
4690 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4691 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4693 if (CHIP_IS_E1H(bp)) {
4694 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4696 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4698 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4700 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4703 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4707 /* Init CQ ring mapping and aggregation size */
4708 max_agg_size = min((u32)(bp->rx_buf_use_size +
4709 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4711 for_each_queue(bp, i) {
4712 struct bnx2x_fastpath *fp = &bp->fp[i];
4714 REG_WR(bp, BAR_USTRORM_INTMEM +
4715 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4716 U64_LO(fp->rx_comp_mapping));
4717 REG_WR(bp, BAR_USTRORM_INTMEM +
4718 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4719 U64_HI(fp->rx_comp_mapping));
4721 REG_WR16(bp, BAR_USTRORM_INTMEM +
4722 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4727 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4729 switch (load_code) {
4730 case FW_MSG_CODE_DRV_LOAD_COMMON:
4731 bnx2x_init_internal_common(bp);
4734 case FW_MSG_CODE_DRV_LOAD_PORT:
4735 bnx2x_init_internal_port(bp);
4738 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4739 bnx2x_init_internal_func(bp);
4743 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4748 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4752 for_each_queue(bp, i) {
4753 struct bnx2x_fastpath *fp = &bp->fp[i];
4756 fp->state = BNX2X_FP_STATE_CLOSED;
4758 fp->cl_id = BP_L_ID(bp) + i;
4759 fp->sb_id = fp->cl_id;
4761 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4762 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4763 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4764 fp->status_blk_mapping);
4767 bnx2x_init_def_sb(bp, bp->def_status_blk,
4768 bp->def_status_blk_mapping, DEF_SB_ID);
4769 bnx2x_update_coalesce(bp);
4770 bnx2x_init_rx_rings(bp);
4771 bnx2x_init_tx_ring(bp);
4772 bnx2x_init_sp_ring(bp);
4773 bnx2x_init_context(bp);
4774 bnx2x_init_internal(bp, load_code);
4775 bnx2x_init_ind_table(bp);
4776 bnx2x_int_enable(bp);
4779 /* end of nic init */
4782 * gzip service functions
4785 static int bnx2x_gunzip_init(struct bnx2x *bp)
4787 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4788 &bp->gunzip_mapping);
4789 if (bp->gunzip_buf == NULL)
4792 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4793 if (bp->strm == NULL)
4796 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4798 if (bp->strm->workspace == NULL)
4808 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4809 bp->gunzip_mapping);
4810 bp->gunzip_buf = NULL;
4813 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4814 " un-compression\n", bp->dev->name);
4818 static void bnx2x_gunzip_end(struct bnx2x *bp)
4820 kfree(bp->strm->workspace);
4825 if (bp->gunzip_buf) {
4826 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4827 bp->gunzip_mapping);
4828 bp->gunzip_buf = NULL;
4832 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4836 /* check gzip header */
4837 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4844 if (zbuf[3] & FNAME)
4845 while ((zbuf[n++] != 0) && (n < len));
4847 bp->strm->next_in = zbuf + n;
4848 bp->strm->avail_in = len - n;
4849 bp->strm->next_out = bp->gunzip_buf;
4850 bp->strm->avail_out = FW_BUF_SIZE;
4852 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4856 rc = zlib_inflate(bp->strm, Z_FINISH);
4857 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4858 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4859 bp->dev->name, bp->strm->msg);
4861 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4862 if (bp->gunzip_outlen & 0x3)
4863 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4864 " gunzip_outlen (%d) not aligned\n",
4865 bp->dev->name, bp->gunzip_outlen);
4866 bp->gunzip_outlen >>= 2;
4868 zlib_inflateEnd(bp->strm);
4870 if (rc == Z_STREAM_END)
4876 /* nic load/unload */
4879 * General service functions
4882 /* send a NIG loopback debug packet */
4883 static void bnx2x_lb_pckt(struct bnx2x *bp)
4887 /* Ethernet source and destination addresses */
4888 wb_write[0] = 0x55555555;
4889 wb_write[1] = 0x55555555;
4890 wb_write[2] = 0x20; /* SOP */
4891 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4893 /* NON-IP protocol */
4894 wb_write[0] = 0x09000000;
4895 wb_write[1] = 0x55555555;
4896 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4897 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4900 /* some of the internal memories
4901 * are not directly readable from the driver
4902 * to test them we send debug packets
4904 static int bnx2x_int_mem_test(struct bnx2x *bp)
4910 if (CHIP_REV_IS_FPGA(bp))
4912 else if (CHIP_REV_IS_EMUL(bp))
4917 DP(NETIF_MSG_HW, "start part1\n");
4919 /* Disable inputs of parser neighbor blocks */
4920 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4921 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4922 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4923 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4925 /* Write 0 to parser credits for CFC search request */
4926 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4928 /* send Ethernet packet */
4931 /* TODO do i reset NIG statistic? */
4932 /* Wait until NIG register shows 1 packet of size 0x10 */
4933 count = 1000 * factor;
4936 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4937 val = *bnx2x_sp(bp, wb_data[0]);
4945 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4949 /* Wait until PRS register shows 1 packet */
4950 count = 1000 * factor;
4952 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4960 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4964 /* Reset and init BRB, PRS */
4965 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4967 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4969 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4970 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4972 DP(NETIF_MSG_HW, "part2\n");
4974 /* Disable inputs of parser neighbor blocks */
4975 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4976 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4977 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4978 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4980 /* Write 0 to parser credits for CFC search request */
4981 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4983 /* send 10 Ethernet packets */
4984 for (i = 0; i < 10; i++)
4987 /* Wait until NIG register shows 10 + 1
4988 packets of size 11*0x10 = 0xb0 */
4989 count = 1000 * factor;
4992 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4993 val = *bnx2x_sp(bp, wb_data[0]);
5001 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5005 /* Wait until PRS register shows 2 packets */
5006 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5008 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5010 /* Write 1 to parser credits for CFC search request */
5011 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5013 /* Wait until PRS register shows 3 packets */
5014 msleep(10 * factor);
5015 /* Wait until NIG register shows 1 packet of size 0x10 */
5016 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5018 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5020 /* clear NIG EOP FIFO */
5021 for (i = 0; i < 11; i++)
5022 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5023 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5025 BNX2X_ERR("clear of NIG failed\n");
5029 /* Reset and init BRB, PRS, NIG */
5030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5032 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5034 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5035 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5041 /* Enable inputs of parser neighbor blocks */
5042 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5043 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5044 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5045 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5047 DP(NETIF_MSG_HW, "done\n");
5052 static void enable_blocks_attention(struct bnx2x *bp)
5054 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5055 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5056 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5057 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5058 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5059 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5060 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5061 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5062 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5063 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5064 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5065 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5066 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5067 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5068 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5069 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5070 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5071 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5072 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5073 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5074 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5075 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5076 if (CHIP_REV_IS_FPGA(bp))
5077 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5079 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5080 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5081 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5082 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5083 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5084 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5085 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5086 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5087 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5088 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5092 static int bnx2x_init_common(struct bnx2x *bp)
5096 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5098 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5099 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5101 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5102 if (CHIP_IS_E1H(bp))
5103 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5105 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5107 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5109 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5110 if (CHIP_IS_E1(bp)) {
5111 /* enable HW interrupt from PXP on USDM overflow
5112 bit 16 on INT_MASK_0 */
5113 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5116 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5120 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5121 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5122 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5123 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5124 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5125 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5127 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5128 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5129 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5130 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5131 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5136 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5139 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5141 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5142 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5143 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5146 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5147 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5149 /* let the HW do it's magic ... */
5151 /* finish PXP init */
5152 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5154 BNX2X_ERR("PXP2 CFG failed\n");
5157 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5159 BNX2X_ERR("PXP2 RD_INIT failed\n");
5163 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5164 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5166 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5168 /* clean the DMAE memory */
5170 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5172 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5173 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5174 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5175 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5177 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5178 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5179 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5180 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5182 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5183 /* soft reset pulse */
5184 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5185 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5188 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5191 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5192 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5193 if (!CHIP_REV_IS_SLOW(bp)) {
5194 /* enable hw interrupt from doorbell Q */
5195 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5198 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5199 if (CHIP_REV_IS_SLOW(bp)) {
5200 /* fix for emulation and FPGA for no pause */
5201 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5202 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5203 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5204 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5207 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5208 if (CHIP_IS_E1H(bp))
5209 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5211 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5212 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5213 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5214 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5216 if (CHIP_IS_E1H(bp)) {
5217 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5218 STORM_INTMEM_SIZE_E1H/2);
5220 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221 0, STORM_INTMEM_SIZE_E1H/2);
5222 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5223 STORM_INTMEM_SIZE_E1H/2);
5225 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226 0, STORM_INTMEM_SIZE_E1H/2);
5227 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5228 STORM_INTMEM_SIZE_E1H/2);
5230 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231 0, STORM_INTMEM_SIZE_E1H/2);
5232 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5233 STORM_INTMEM_SIZE_E1H/2);
5235 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236 0, STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1);
5240 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1);
5242 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1);
5248 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5249 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5250 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5251 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5254 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5259 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5260 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5261 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5263 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5264 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5265 REG_WR(bp, i, 0xc0cac01a);
5266 /* TODO: replace with something meaningful */
5268 if (CHIP_IS_E1H(bp))
5269 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5270 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5272 if (sizeof(union cdu_context) != 1024)
5273 /* we currently assume that a context is 1024 bytes */
5274 printk(KERN_ALERT PFX "please adjust the size of"
5275 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5277 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5278 val = (4 << 24) + (0 << 12) + 1024;
5279 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5280 if (CHIP_IS_E1(bp)) {
5281 /* !!! fix pxp client crdit until excel update */
5282 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5283 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5286 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5287 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5289 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5290 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5292 /* PXPCS COMMON comes here */
5293 /* Reset PCIE errors for debug */
5294 REG_WR(bp, 0x2814, 0xffffffff);
5295 REG_WR(bp, 0x3820, 0xffffffff);
5297 /* EMAC0 COMMON comes here */
5298 /* EMAC1 COMMON comes here */
5299 /* DBU COMMON comes here */
5300 /* DBG COMMON comes here */
5302 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5303 if (CHIP_IS_E1H(bp)) {
5304 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5305 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5308 if (CHIP_REV_IS_SLOW(bp))
5311 /* finish CFC init */
5312 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5314 BNX2X_ERR("CFC LL_INIT failed\n");
5317 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5319 BNX2X_ERR("CFC AC_INIT failed\n");
5322 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5324 BNX2X_ERR("CFC CAM_INIT failed\n");
5327 REG_WR(bp, CFC_REG_DEBUG0, 0);
5329 /* read NIG statistic
5330 to see if this is our first up since powerup */
5331 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5332 val = *bnx2x_sp(bp, wb_data[0]);
5334 /* do internal memory self test */
5335 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5336 BNX2X_ERR("internal mem self test failed\n");
5340 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5341 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5342 /* Fan failure is indicated by SPIO 5 */
5343 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5344 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5346 /* set to active low mode */
5347 val = REG_RD(bp, MISC_REG_SPIO_INT);
5348 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5349 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5350 REG_WR(bp, MISC_REG_SPIO_INT, val);
5352 /* enable interrupt to signal the IGU */
5353 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5354 val |= (1 << MISC_REGISTERS_SPIO_5);
5355 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5362 /* clear PXP2 attentions */
5363 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5365 enable_blocks_attention(bp);
5367 if (bp->flags & TPA_ENABLE_FLAG) {
5368 struct tstorm_eth_tpa_exist tmp = {0};
5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5374 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5381 static int bnx2x_init_port(struct bnx2x *bp)
5383 int port = BP_PORT(bp);
5386 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5388 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5390 /* Port PXP comes here */
5391 /* Port PXP2 comes here */
5396 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5397 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5398 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5399 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5404 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5405 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5406 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5412 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5413 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5414 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5415 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5417 /* Port CMs come here */
5419 /* Port QM comes here */
5421 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5422 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5424 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5425 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5427 /* Port DQ comes here */
5428 /* Port BRB1 comes here */
5429 /* Port PRS comes here */
5430 /* Port TSDM comes here */
5431 /* Port CSDM comes here */
5432 /* Port USDM comes here */
5433 /* Port XSDM comes here */
5434 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5435 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5436 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5437 port ? USEM_PORT1_END : USEM_PORT0_END);
5438 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5439 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5440 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5441 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5442 /* Port UPB comes here */
5443 /* Port XPB comes here */
5445 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5446 port ? PBF_PORT1_END : PBF_PORT0_END);
5448 /* configure PBF to work without PAUSE mtu 9000 */
5449 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5451 /* update threshold */
5452 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5453 /* update init credit */
5454 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5457 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5459 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5462 /* tell the searcher where the T2 table is */
5463 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5465 wb_write[0] = U64_LO(bp->t2_mapping);
5466 wb_write[1] = U64_HI(bp->t2_mapping);
5467 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5468 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5469 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5470 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5472 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5473 /* Port SRCH comes here */
5475 /* Port CDU comes here */
5476 /* Port CFC comes here */
5478 if (CHIP_IS_E1(bp)) {
5479 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5480 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5482 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5483 port ? HC_PORT1_END : HC_PORT0_END);
5485 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5486 MISC_AEU_PORT0_START,
5487 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5488 /* init aeu_mask_attn_func_0/1:
5489 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5490 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5491 * bits 4-7 are used for "per vn group attention" */
5492 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5493 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5495 /* Port PXPCS comes here */
5496 /* Port EMAC0 comes here */
5497 /* Port EMAC1 comes here */
5498 /* Port DBU comes here */
5499 /* Port DBG comes here */
5500 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5501 port ? NIG_PORT1_END : NIG_PORT0_END);
5503 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5505 if (CHIP_IS_E1H(bp)) {
5507 struct cmng_struct_per_port m_cmng_port;
5510 /* 0x2 disable e1hov, 0x1 enable */
5511 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5512 (IS_E1HMF(bp) ? 0x1 : 0x2));
5514 /* Init RATE SHAPING and FAIRNESS contexts.
5515 Initialize as if there is 10G link. */
5516 wsum = bnx2x_calc_vn_wsum(bp);
5517 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5519 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5520 bnx2x_init_vn_minmax(bp, 2*vn + port,
5521 wsum, 10000, &m_cmng_port);
5524 /* Port MCP comes here */
5525 /* Port DMAE comes here */
5527 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5528 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5529 /* add SPIO 5 to group 0 */
5530 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5531 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5532 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5539 bnx2x__link_reset(bp);
5544 #define ILT_PER_FUNC (768/2)
5545 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5546 /* the phys address is shifted right 12 bits and has an added
5547 1=valid bit added to the 53rd bit
5548 then since this is a wide register(TM)
5549 we split it into two 32 bit writes
5551 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5552 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5553 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5554 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5556 #define CNIC_ILT_LINES 0
5558 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5562 if (CHIP_IS_E1H(bp))
5563 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5565 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5567 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5570 static int bnx2x_init_func(struct bnx2x *bp)
5572 int port = BP_PORT(bp);
5573 int func = BP_FUNC(bp);
5576 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5578 i = FUNC_ILT_BASE(func);
5580 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5581 if (CHIP_IS_E1H(bp)) {
5582 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5583 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5585 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5586 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5589 if (CHIP_IS_E1H(bp)) {
5590 for (i = 0; i < 9; i++)
5591 bnx2x_init_block(bp,
5592 cm_start[func][i], cm_end[func][i]);
5594 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5595 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5598 /* HC init per function */
5599 if (CHIP_IS_E1H(bp)) {
5600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5602 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5603 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5605 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5607 if (CHIP_IS_E1H(bp))
5608 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5610 /* Reset PCIE errors for debug */
5611 REG_WR(bp, 0x2114, 0xffffffff);
5612 REG_WR(bp, 0x2120, 0xffffffff);
5617 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5621 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5622 BP_FUNC(bp), load_code);
5625 mutex_init(&bp->dmae_mutex);
5626 bnx2x_gunzip_init(bp);
5628 switch (load_code) {
5629 case FW_MSG_CODE_DRV_LOAD_COMMON:
5630 rc = bnx2x_init_common(bp);
5635 case FW_MSG_CODE_DRV_LOAD_PORT:
5637 rc = bnx2x_init_port(bp);
5642 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5644 rc = bnx2x_init_func(bp);
5650 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5654 if (!BP_NOMCP(bp)) {
5655 int func = BP_FUNC(bp);
5657 bp->fw_drv_pulse_wr_seq =
5658 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5659 DRV_PULSE_SEQ_MASK);
5660 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5661 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5662 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5666 /* this needs to be done before gunzip end */
5667 bnx2x_zero_def_sb(bp);
5668 for_each_queue(bp, i)
5669 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5672 bnx2x_gunzip_end(bp);
5677 /* send the MCP a request, block until there is a reply */
5678 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5680 int func = BP_FUNC(bp);
5681 u32 seq = ++bp->fw_seq;
5684 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5686 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5687 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5690 /* let the FW do it's magic ... */
5693 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5695 /* Give the FW up to 2 second (200*10ms) */
5696 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5698 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5699 cnt*delay, rc, seq);
5701 /* is this a reply to our command? */
5702 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5703 rc &= FW_MSG_CODE_MASK;
5707 BNX2X_ERR("FW failed to respond!\n");
5715 static void bnx2x_free_mem(struct bnx2x *bp)
5718 #define BNX2X_PCI_FREE(x, y, size) \
5721 pci_free_consistent(bp->pdev, size, x, y); \
5727 #define BNX2X_FREE(x) \
5738 for_each_queue(bp, i) {
5741 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5742 bnx2x_fp(bp, i, status_blk_mapping),
5743 sizeof(struct host_status_block) +
5744 sizeof(struct eth_tx_db_data));
5746 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5747 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5749 bnx2x_fp(bp, i, tx_desc_mapping),
5750 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5752 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5753 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5754 bnx2x_fp(bp, i, rx_desc_mapping),
5755 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5757 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5758 bnx2x_fp(bp, i, rx_comp_mapping),
5759 sizeof(struct eth_fast_path_rx_cqe) *
5763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5764 bnx2x_fp(bp, i, rx_sge_mapping),
5765 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5767 /* end of fastpath */
5769 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5770 sizeof(struct host_def_status_block));
5772 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5773 sizeof(struct bnx2x_slowpath));
5776 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5778 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5779 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5781 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5783 #undef BNX2X_PCI_FREE
5787 static int bnx2x_alloc_mem(struct bnx2x *bp)
5790 #define BNX2X_PCI_ALLOC(x, y, size) \
5792 x = pci_alloc_consistent(bp->pdev, size, y); \
5794 goto alloc_mem_err; \
5795 memset(x, 0, size); \
5798 #define BNX2X_ALLOC(x, size) \
5800 x = vmalloc(size); \
5802 goto alloc_mem_err; \
5803 memset(x, 0, size); \
5809 for_each_queue(bp, i) {
5810 bnx2x_fp(bp, i, bp) = bp;
5813 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5814 &bnx2x_fp(bp, i, status_blk_mapping),
5815 sizeof(struct host_status_block) +
5816 sizeof(struct eth_tx_db_data));
5818 bnx2x_fp(bp, i, hw_tx_prods) =
5819 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5821 bnx2x_fp(bp, i, tx_prods_mapping) =
5822 bnx2x_fp(bp, i, status_blk_mapping) +
5823 sizeof(struct host_status_block);
5825 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5826 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5827 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5828 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5829 &bnx2x_fp(bp, i, tx_desc_mapping),
5830 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5832 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5833 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5835 &bnx2x_fp(bp, i, rx_desc_mapping),
5836 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5839 &bnx2x_fp(bp, i, rx_comp_mapping),
5840 sizeof(struct eth_fast_path_rx_cqe) *
5844 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5845 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5846 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5847 &bnx2x_fp(bp, i, rx_sge_mapping),
5848 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5850 /* end of fastpath */
5852 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5853 sizeof(struct host_def_status_block));
5855 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5856 sizeof(struct bnx2x_slowpath));
5859 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5862 for (i = 0; i < 64*1024; i += 64) {
5863 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5864 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5867 /* allocate searcher T2 table
5868 we allocate 1/4 of alloc num for T2
5869 (which is not entered into the ILT) */
5870 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5873 for (i = 0; i < 16*1024; i += 64)
5874 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5876 /* now fixup the last line in the block to point to the next block */
5877 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5879 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5880 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5882 /* QM queues (128*MAX_CONN) */
5883 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5886 /* Slow path ring */
5887 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5895 #undef BNX2X_PCI_ALLOC
5899 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5903 for_each_queue(bp, i) {
5904 struct bnx2x_fastpath *fp = &bp->fp[i];
5906 u16 bd_cons = fp->tx_bd_cons;
5907 u16 sw_prod = fp->tx_pkt_prod;
5908 u16 sw_cons = fp->tx_pkt_cons;
5910 while (sw_cons != sw_prod) {
5911 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5917 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5921 for_each_queue(bp, j) {
5922 struct bnx2x_fastpath *fp = &bp->fp[j];
5924 for (i = 0; i < NUM_RX_BD; i++) {
5925 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5926 struct sk_buff *skb = rx_buf->skb;
5931 pci_unmap_single(bp->pdev,
5932 pci_unmap_addr(rx_buf, mapping),
5933 bp->rx_buf_use_size,
5934 PCI_DMA_FROMDEVICE);
5939 if (!fp->disable_tpa)
5940 bnx2x_free_tpa_pool(bp, fp,
5941 ETH_MAX_AGGREGATION_QUEUES_E1H);
5945 static void bnx2x_free_skbs(struct bnx2x *bp)
5947 bnx2x_free_tx_skbs(bp);
5948 bnx2x_free_rx_skbs(bp);
5951 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5955 free_irq(bp->msix_table[0].vector, bp->dev);
5956 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5957 bp->msix_table[0].vector);
5959 for_each_queue(bp, i) {
5960 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5961 "state %x\n", i, bp->msix_table[i + offset].vector,
5962 bnx2x_fp(bp, i, state));
5964 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5965 BNX2X_ERR("IRQ of fp #%d being freed while "
5966 "state != closed\n", i);
5968 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5972 static void bnx2x_free_irq(struct bnx2x *bp)
5974 if (bp->flags & USING_MSIX_FLAG) {
5975 bnx2x_free_msix_irqs(bp);
5976 pci_disable_msix(bp->pdev);
5977 bp->flags &= ~USING_MSIX_FLAG;
5980 free_irq(bp->pdev->irq, bp->dev);
5983 static int bnx2x_enable_msix(struct bnx2x *bp)
5987 bp->msix_table[0].entry = 0;
5989 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5991 for_each_queue(bp, i) {
5992 int igu_vec = offset + i + BP_L_ID(bp);
5994 bp->msix_table[i + offset].entry = igu_vec;
5995 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5996 "(fastpath #%u)\n", i + offset, igu_vec, i);
5999 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6000 bp->num_queues + offset);
6002 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6005 bp->flags |= USING_MSIX_FLAG;
6010 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6012 int i, rc, offset = 1;
6014 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6015 bp->dev->name, bp->dev);
6017 BNX2X_ERR("request sp irq failed\n");
6021 for_each_queue(bp, i) {
6022 rc = request_irq(bp->msix_table[i + offset].vector,
6023 bnx2x_msix_fp_int, 0,
6024 bp->dev->name, &bp->fp[i]);
6026 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6028 bnx2x_free_msix_irqs(bp);
6032 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6038 static int bnx2x_req_irq(struct bnx2x *bp)
6042 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6043 bp->dev->name, bp->dev);
6045 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6051 * Init service functions
6054 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6056 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6057 int port = BP_PORT(bp);
6060 * unicasts 0-31:port0 32-63:port1
6061 * multicast 64-127:port0 128-191:port1
6063 config->hdr.length_6b = 2;
6064 config->hdr.offset = port ? 31 : 0;
6065 config->hdr.client_id = BP_CL_ID(bp);
6066 config->hdr.reserved1 = 0;
6069 config->config_table[0].cam_entry.msb_mac_addr =
6070 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6071 config->config_table[0].cam_entry.middle_mac_addr =
6072 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6073 config->config_table[0].cam_entry.lsb_mac_addr =
6074 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6075 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6076 config->config_table[0].target_table_entry.flags = 0;
6077 config->config_table[0].target_table_entry.client_id = 0;
6078 config->config_table[0].target_table_entry.vlan_id = 0;
6080 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6081 config->config_table[0].cam_entry.msb_mac_addr,
6082 config->config_table[0].cam_entry.middle_mac_addr,
6083 config->config_table[0].cam_entry.lsb_mac_addr);
6086 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6087 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6088 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6089 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6090 config->config_table[1].target_table_entry.flags =
6091 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6092 config->config_table[1].target_table_entry.client_id = 0;
6093 config->config_table[1].target_table_entry.vlan_id = 0;
6095 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6096 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6097 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6100 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6102 struct mac_configuration_cmd_e1h *config =
6103 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6105 if (bp->state != BNX2X_STATE_OPEN) {
6106 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6110 /* CAM allocation for E1H
6111 * unicasts: by func number
6112 * multicast: 20+FUNC*20, 20 each
6114 config->hdr.length_6b = 1;
6115 config->hdr.offset = BP_FUNC(bp);
6116 config->hdr.client_id = BP_CL_ID(bp);
6117 config->hdr.reserved1 = 0;
6120 config->config_table[0].msb_mac_addr =
6121 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6122 config->config_table[0].middle_mac_addr =
6123 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6124 config->config_table[0].lsb_mac_addr =
6125 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6126 config->config_table[0].client_id = BP_L_ID(bp);
6127 config->config_table[0].vlan_id = 0;
6128 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6129 config->config_table[0].flags = BP_PORT(bp);
6131 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6132 config->config_table[0].msb_mac_addr,
6133 config->config_table[0].middle_mac_addr,
6134 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6136 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6137 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6138 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6141 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6142 int *state_p, int poll)
6144 /* can take a while if any port is running */
6147 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6148 poll ? "polling" : "waiting", state, idx);
6153 bnx2x_rx_int(bp->fp, 10);
6154 /* if index is different from 0
6155 * the reply for some commands will
6156 * be on the none default queue
6159 bnx2x_rx_int(&bp->fp[idx], 10);
6161 mb(); /* state is changed by bnx2x_sp_event() */
6163 if (*state_p == state)
6170 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6171 poll ? "polling" : "waiting", state, idx);
6172 #ifdef BNX2X_STOP_ON_ERROR
6179 static int bnx2x_setup_leading(struct bnx2x *bp)
6183 /* reset IGU state */
6184 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6187 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6189 /* Wait for completion */
6190 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6195 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6197 /* reset IGU state */
6198 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6201 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6202 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6204 /* Wait for completion */
6205 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6206 &(bp->fp[index].state), 0);
6209 static int bnx2x_poll(struct napi_struct *napi, int budget);
6210 static void bnx2x_set_rx_mode(struct net_device *dev);
6212 /* must be called with rtnl_lock */
6213 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6218 #ifdef BNX2X_STOP_ON_ERROR
6219 if (unlikely(bp->panic))
6223 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6225 /* Send LOAD_REQUEST command to MCP
6226 Returns the type of LOAD command:
6227 if it is the first port to be initialized
6228 common blocks should be initialized, otherwise - not
6230 if (!BP_NOMCP(bp)) {
6231 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6233 BNX2X_ERR("MCP response failure, unloading\n");
6236 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6237 return -EBUSY; /* other port in diagnostic mode */
6240 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6241 load_count[0], load_count[1], load_count[2]);
6243 load_count[1 + BP_PORT(bp)]++;
6244 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6245 load_count[0], load_count[1], load_count[2]);
6246 if (load_count[0] == 1)
6247 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6248 else if (load_count[1 + BP_PORT(bp)] == 1)
6249 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6251 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6254 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6255 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6259 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6261 /* if we can't use MSI-X we only need one fp,
6262 * so try to enable MSI-X with the requested number of fp's
6263 * and fallback to inta with one fp
6269 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6270 /* user requested number */
6271 bp->num_queues = use_multi;
6274 bp->num_queues = min_t(u32, num_online_cpus(),
6279 if (bnx2x_enable_msix(bp)) {
6280 /* failed to enable MSI-X */
6283 BNX2X_ERR("Multi requested but failed"
6284 " to enable MSI-X\n");
6288 "set number of queues to %d\n", bp->num_queues);
6290 if (bnx2x_alloc_mem(bp))
6293 for_each_queue(bp, i)
6294 bnx2x_fp(bp, i, disable_tpa) =
6295 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6297 /* Disable interrupt handling until HW is initialized */
6298 atomic_set(&bp->intr_sem, 1);
6300 if (bp->flags & USING_MSIX_FLAG) {
6301 rc = bnx2x_req_msix_irqs(bp);
6303 pci_disable_msix(bp->pdev);
6308 rc = bnx2x_req_irq(bp);
6310 BNX2X_ERR("IRQ request failed, aborting\n");
6315 for_each_queue(bp, i)
6316 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6320 rc = bnx2x_init_hw(bp, load_code);
6322 BNX2X_ERR("HW init failed, aborting\n");
6326 /* Enable interrupt handling */
6327 atomic_set(&bp->intr_sem, 0);
6329 /* Setup NIC internals and enable interrupts */
6330 bnx2x_nic_init(bp, load_code);
6332 /* Send LOAD_DONE command to MCP */
6333 if (!BP_NOMCP(bp)) {
6334 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6336 BNX2X_ERR("MCP response failure, unloading\n");
6338 goto load_int_disable;
6342 bnx2x_stats_init(bp);
6344 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6346 /* Enable Rx interrupt handling before sending the ramrod
6347 as it's completed on Rx FP queue */
6348 for_each_queue(bp, i)
6349 napi_enable(&bnx2x_fp(bp, i, napi));
6351 rc = bnx2x_setup_leading(bp);
6353 #ifdef BNX2X_STOP_ON_ERROR
6356 goto load_stop_netif;
6359 if (CHIP_IS_E1H(bp))
6360 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6361 BNX2X_ERR("!!! mf_cfg function disabled\n");
6362 bp->state = BNX2X_STATE_DISABLED;
6365 if (bp->state == BNX2X_STATE_OPEN)
6366 for_each_nondefault_queue(bp, i) {
6367 rc = bnx2x_setup_multi(bp, i);
6369 goto load_stop_netif;
6373 bnx2x_set_mac_addr_e1(bp);
6375 bnx2x_set_mac_addr_e1h(bp);
6378 bnx2x_initial_phy_init(bp);
6380 /* Start fast path */
6381 switch (load_mode) {
6383 /* Tx queue should be only reenabled */
6384 netif_wake_queue(bp->dev);
6385 bnx2x_set_rx_mode(bp->dev);
6389 /* IRQ is only requested from bnx2x_open */
6390 netif_start_queue(bp->dev);
6391 bnx2x_set_rx_mode(bp->dev);
6392 if (bp->flags & USING_MSIX_FLAG)
6393 printk(KERN_INFO PFX "%s: using MSI-X\n",
6398 bnx2x_set_rx_mode(bp->dev);
6399 bp->state = BNX2X_STATE_DIAG;
6407 bnx2x__link_status_update(bp);
6409 /* start the timer */
6410 mod_timer(&bp->timer, jiffies + bp->current_interval);
6416 for_each_queue(bp, i)
6417 napi_disable(&bnx2x_fp(bp, i, napi));
6420 bnx2x_int_disable_sync(bp);
6425 /* Free SKBs, SGEs, TPA pool and driver internals */
6426 bnx2x_free_skbs(bp);
6427 for_each_queue(bp, i)
6428 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6429 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6433 /* TBD we really need to reset the chip
6434 if we want to recover from this */
6438 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6442 /* halt the connection */
6443 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6444 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6446 /* Wait for completion */
6447 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6448 &(bp->fp[index].state), 1);
6449 if (rc) /* timeout */
6452 /* delete cfc entry */
6453 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6455 /* Wait for completion */
6456 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6457 &(bp->fp[index].state), 1);
6461 static void bnx2x_stop_leading(struct bnx2x *bp)
6463 u16 dsb_sp_prod_idx;
6464 /* if the other port is handling traffic,
6465 this can take a lot of time */
6471 /* Send HALT ramrod */
6472 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6473 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6475 /* Wait for completion */
6476 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6477 &(bp->fp[0].state), 1);
6478 if (rc) /* timeout */
6481 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6483 /* Send PORT_DELETE ramrod */
6484 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6486 /* Wait for completion to arrive on default status block
6487 we are going to reset the chip anyway
6488 so there is not much to do if this times out
6490 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6493 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6494 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6495 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6496 #ifdef BNX2X_STOP_ON_ERROR
6503 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6504 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6507 static void bnx2x_reset_func(struct bnx2x *bp)
6509 int port = BP_PORT(bp);
6510 int func = BP_FUNC(bp);
6514 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6515 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6517 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6520 base = FUNC_ILT_BASE(func);
6521 for (i = base; i < base + ILT_PER_FUNC; i++)
6522 bnx2x_ilt_wr(bp, i, 0);
6525 static void bnx2x_reset_port(struct bnx2x *bp)
6527 int port = BP_PORT(bp);
6530 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6532 /* Do not rcv packets to BRB */
6533 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6534 /* Do not direct rcv packets that are not for MCP to the BRB */
6535 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6536 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6539 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6542 /* Check for BRB port occupancy */
6543 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6545 DP(NETIF_MSG_IFDOWN,
6546 "BRB1 is not empty %d blooks are occupied\n", val);
6548 /* TODO: Close Doorbell port? */
6551 static void bnx2x_reset_common(struct bnx2x *bp)
6554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6559 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6561 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6562 BP_FUNC(bp), reset_code);
6564 switch (reset_code) {
6565 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6566 bnx2x_reset_port(bp);
6567 bnx2x_reset_func(bp);
6568 bnx2x_reset_common(bp);
6571 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6572 bnx2x_reset_port(bp);
6573 bnx2x_reset_func(bp);
6576 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6577 bnx2x_reset_func(bp);
6581 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6586 /* msut be called with rtnl_lock */
6587 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6592 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6594 bp->rx_mode = BNX2X_RX_MODE_NONE;
6595 bnx2x_set_storm_rx_mode(bp);
6597 if (netif_running(bp->dev)) {
6598 netif_tx_disable(bp->dev);
6599 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6602 del_timer_sync(&bp->timer);
6603 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6604 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6605 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6607 /* Wait until all fast path tasks complete */
6608 for_each_queue(bp, i) {
6609 struct bnx2x_fastpath *fp = &bp->fp[i];
6611 #ifdef BNX2X_STOP_ON_ERROR
6612 #ifdef __powerpc64__
6613 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6615 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6617 fp->tpa_queue_used);
6621 while (bnx2x_has_work(fp)) {
6624 BNX2X_ERR("timeout waiting for queue[%d]\n",
6626 #ifdef BNX2X_STOP_ON_ERROR
6638 /* Wait until all slow path tasks complete */
6640 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6643 for_each_queue(bp, i)
6644 napi_disable(&bnx2x_fp(bp, i, napi));
6645 /* Disable interrupts after Tx and Rx are disabled on stack level */
6646 bnx2x_int_disable_sync(bp);
6651 if (bp->flags & NO_WOL_FLAG)
6652 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6655 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6656 u8 *mac_addr = bp->dev->dev_addr;
6659 /* The mac address is written to entries 1-4 to
6660 preserve entry 0 which is used by the PMF */
6661 val = (mac_addr[0] << 8) | mac_addr[1];
6662 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
6664 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6665 (mac_addr[4] << 8) | mac_addr[5];
6666 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6672 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6674 /* Close multi and leading connections
6675 Completions for ramrods are collected in a synchronous way */
6676 for_each_nondefault_queue(bp, i)
6677 if (bnx2x_stop_multi(bp, i))
6680 if (CHIP_IS_E1H(bp))
6681 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6683 bnx2x_stop_leading(bp);
6684 #ifdef BNX2X_STOP_ON_ERROR
6685 /* If ramrod completion timed out - break here! */
6687 BNX2X_ERR("Stop leading failed!\n");
6692 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6693 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6694 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6695 "state 0x%x fp[0].state 0x%x\n",
6696 bp->state, bp->fp[0].state);
6701 reset_code = bnx2x_fw_command(bp, reset_code);
6703 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6704 load_count[0], load_count[1], load_count[2]);
6706 load_count[1 + BP_PORT(bp)]--;
6707 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6708 load_count[0], load_count[1], load_count[2]);
6709 if (load_count[0] == 0)
6710 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6711 else if (load_count[1 + BP_PORT(bp)] == 0)
6712 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6714 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6717 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6718 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6719 bnx2x__link_reset(bp);
6721 /* Reset the chip */
6722 bnx2x_reset_chip(bp, reset_code);
6724 /* Report UNLOAD_DONE to MCP */
6726 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6728 /* Free SKBs, SGEs, TPA pool and driver internals */
6729 bnx2x_free_skbs(bp);
6730 for_each_queue(bp, i)
6731 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6732 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6735 bp->state = BNX2X_STATE_CLOSED;
6737 netif_carrier_off(bp->dev);
6742 static void bnx2x_reset_task(struct work_struct *work)
6744 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6746 #ifdef BNX2X_STOP_ON_ERROR
6747 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6748 " so reset not done to allow debug dump,\n"
6749 KERN_ERR " you will need to reboot when done\n");
6755 if (!netif_running(bp->dev))
6756 goto reset_task_exit;
6758 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6759 bnx2x_nic_load(bp, LOAD_NORMAL);
6765 /* end of nic load/unload */
6770 * Init service functions
6773 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6777 /* Check if there is any driver already loaded */
6778 val = REG_RD(bp, MISC_REG_UNPREPARED);
6780 /* Check if it is the UNDI driver
6781 * UNDI driver initializes CID offset for normal bell to 0x7
6783 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6785 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6786 /* save our func and fw_seq */
6787 int func = BP_FUNC(bp);
6788 u16 fw_seq = bp->fw_seq;
6790 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6792 /* try unload UNDI on port 0 */
6794 bp->fw_seq = (SHMEM_RD(bp,
6795 func_mb[bp->func].drv_mb_header) &
6796 DRV_MSG_SEQ_NUMBER_MASK);
6798 reset_code = bnx2x_fw_command(bp, reset_code);
6799 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6801 /* if UNDI is loaded on the other port */
6802 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6805 bp->fw_seq = (SHMEM_RD(bp,
6806 func_mb[bp->func].drv_mb_header) &
6807 DRV_MSG_SEQ_NUMBER_MASK);
6809 bnx2x_fw_command(bp,
6810 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6811 bnx2x_fw_command(bp,
6812 DRV_MSG_CODE_UNLOAD_DONE);
6814 /* restore our func and fw_seq */
6816 bp->fw_seq = fw_seq;
6821 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6824 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6830 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6832 u32 val, val2, val3, val4, id;
6834 /* Get the chip revision id and number. */
6835 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6836 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6837 id = ((val & 0xffff) << 16);
6838 val = REG_RD(bp, MISC_REG_CHIP_REV);
6839 id |= ((val & 0xf) << 12);
6840 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6841 id |= ((val & 0xff) << 4);
6842 REG_RD(bp, MISC_REG_BOND_ID);
6844 bp->common.chip_id = id;
6845 bp->link_params.chip_id = bp->common.chip_id;
6846 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6848 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6849 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6850 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6851 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6852 bp->common.flash_size, bp->common.flash_size);
6854 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6855 bp->link_params.shmem_base = bp->common.shmem_base;
6856 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6858 if (!bp->common.shmem_base ||
6859 (bp->common.shmem_base < 0xA0000) ||
6860 (bp->common.shmem_base >= 0xC0000)) {
6861 BNX2X_DEV_INFO("MCP not active\n");
6862 bp->flags |= NO_MCP_FLAG;
6866 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6867 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6868 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6869 BNX2X_ERR("BAD MCP validity signature\n");
6871 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6872 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6874 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6875 bp->common.hw_config, bp->common.board);
6877 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6878 SHARED_HW_CFG_LED_MODE_MASK) >>
6879 SHARED_HW_CFG_LED_MODE_SHIFT);
6881 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6882 bp->common.bc_ver = val;
6883 BNX2X_DEV_INFO("bc_ver %X\n", val);
6884 if (val < BNX2X_BC_VER) {
6885 /* for now only warn
6886 * later we might need to enforce this */
6887 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6888 " please upgrade BC\n", BNX2X_BC_VER, val);
6890 BNX2X_DEV_INFO("%sWoL Capable\n",
6891 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6893 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6894 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6895 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6896 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6898 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6899 val, val2, val3, val4);
6902 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6905 int port = BP_PORT(bp);
6908 switch (switch_cfg) {
6910 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6913 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6914 switch (ext_phy_type) {
6915 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6916 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6919 bp->port.supported |= (SUPPORTED_10baseT_Half |
6920 SUPPORTED_10baseT_Full |
6921 SUPPORTED_100baseT_Half |
6922 SUPPORTED_100baseT_Full |
6923 SUPPORTED_1000baseT_Full |
6924 SUPPORTED_2500baseX_Full |
6929 SUPPORTED_Asym_Pause);
6932 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6933 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6936 bp->port.supported |= (SUPPORTED_10baseT_Half |
6937 SUPPORTED_10baseT_Full |
6938 SUPPORTED_100baseT_Half |
6939 SUPPORTED_100baseT_Full |
6940 SUPPORTED_1000baseT_Full |
6945 SUPPORTED_Asym_Pause);
6949 BNX2X_ERR("NVRAM config error. "
6950 "BAD SerDes ext_phy_config 0x%x\n",
6951 bp->link_params.ext_phy_config);
6955 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6957 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6960 case SWITCH_CFG_10G:
6961 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6964 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6965 switch (ext_phy_type) {
6966 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6967 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6970 bp->port.supported |= (SUPPORTED_10baseT_Half |
6971 SUPPORTED_10baseT_Full |
6972 SUPPORTED_100baseT_Half |
6973 SUPPORTED_100baseT_Full |
6974 SUPPORTED_1000baseT_Full |
6975 SUPPORTED_2500baseX_Full |
6976 SUPPORTED_10000baseT_Full |
6981 SUPPORTED_Asym_Pause);
6984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6985 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6988 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6991 SUPPORTED_Asym_Pause);
6994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6995 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6998 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6999 SUPPORTED_1000baseT_Full |
7002 SUPPORTED_Asym_Pause);
7005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7006 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7009 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7010 SUPPORTED_1000baseT_Full |
7014 SUPPORTED_Asym_Pause);
7017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7018 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7021 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7022 SUPPORTED_2500baseX_Full |
7023 SUPPORTED_1000baseT_Full |
7027 SUPPORTED_Asym_Pause);
7030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7031 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7034 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7038 SUPPORTED_Asym_Pause);
7041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7042 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7043 bp->link_params.ext_phy_config);
7047 BNX2X_ERR("NVRAM config error. "
7048 "BAD XGXS ext_phy_config 0x%x\n",
7049 bp->link_params.ext_phy_config);
7053 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7055 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7060 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7061 bp->port.link_config);
7064 bp->link_params.phy_addr = bp->port.phy_addr;
7066 /* mask what we support according to speed_cap_mask */
7067 if (!(bp->link_params.speed_cap_mask &
7068 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7069 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7071 if (!(bp->link_params.speed_cap_mask &
7072 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7073 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7075 if (!(bp->link_params.speed_cap_mask &
7076 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7077 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7079 if (!(bp->link_params.speed_cap_mask &
7080 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7081 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7083 if (!(bp->link_params.speed_cap_mask &
7084 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7085 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7086 SUPPORTED_1000baseT_Full);
7088 if (!(bp->link_params.speed_cap_mask &
7089 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7090 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7092 if (!(bp->link_params.speed_cap_mask &
7093 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7094 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7096 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7099 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7101 bp->link_params.req_duplex = DUPLEX_FULL;
7103 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7104 case PORT_FEATURE_LINK_SPEED_AUTO:
7105 if (bp->port.supported & SUPPORTED_Autoneg) {
7106 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7107 bp->port.advertising = bp->port.supported;
7110 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7112 if ((ext_phy_type ==
7113 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7115 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7116 /* force 10G, no AN */
7117 bp->link_params.req_line_speed = SPEED_10000;
7118 bp->port.advertising =
7119 (ADVERTISED_10000baseT_Full |
7123 BNX2X_ERR("NVRAM config error. "
7124 "Invalid link_config 0x%x"
7125 " Autoneg not supported\n",
7126 bp->port.link_config);
7131 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7132 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7133 bp->link_params.req_line_speed = SPEED_10;
7134 bp->port.advertising = (ADVERTISED_10baseT_Full |
7137 BNX2X_ERR("NVRAM config error. "
7138 "Invalid link_config 0x%x"
7139 " speed_cap_mask 0x%x\n",
7140 bp->port.link_config,
7141 bp->link_params.speed_cap_mask);
7146 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7147 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7148 bp->link_params.req_line_speed = SPEED_10;
7149 bp->link_params.req_duplex = DUPLEX_HALF;
7150 bp->port.advertising = (ADVERTISED_10baseT_Half |
7153 BNX2X_ERR("NVRAM config error. "
7154 "Invalid link_config 0x%x"
7155 " speed_cap_mask 0x%x\n",
7156 bp->port.link_config,
7157 bp->link_params.speed_cap_mask);
7162 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7163 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7164 bp->link_params.req_line_speed = SPEED_100;
7165 bp->port.advertising = (ADVERTISED_100baseT_Full |
7168 BNX2X_ERR("NVRAM config error. "
7169 "Invalid link_config 0x%x"
7170 " speed_cap_mask 0x%x\n",
7171 bp->port.link_config,
7172 bp->link_params.speed_cap_mask);
7177 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7178 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7179 bp->link_params.req_line_speed = SPEED_100;
7180 bp->link_params.req_duplex = DUPLEX_HALF;
7181 bp->port.advertising = (ADVERTISED_100baseT_Half |
7184 BNX2X_ERR("NVRAM config error. "
7185 "Invalid link_config 0x%x"
7186 " speed_cap_mask 0x%x\n",
7187 bp->port.link_config,
7188 bp->link_params.speed_cap_mask);
7193 case PORT_FEATURE_LINK_SPEED_1G:
7194 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7195 bp->link_params.req_line_speed = SPEED_1000;
7196 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7199 BNX2X_ERR("NVRAM config error. "
7200 "Invalid link_config 0x%x"
7201 " speed_cap_mask 0x%x\n",
7202 bp->port.link_config,
7203 bp->link_params.speed_cap_mask);
7208 case PORT_FEATURE_LINK_SPEED_2_5G:
7209 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7210 bp->link_params.req_line_speed = SPEED_2500;
7211 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7214 BNX2X_ERR("NVRAM config error. "
7215 "Invalid link_config 0x%x"
7216 " speed_cap_mask 0x%x\n",
7217 bp->port.link_config,
7218 bp->link_params.speed_cap_mask);
7223 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7224 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7225 case PORT_FEATURE_LINK_SPEED_10G_KR:
7226 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7227 bp->link_params.req_line_speed = SPEED_10000;
7228 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7231 BNX2X_ERR("NVRAM config error. "
7232 "Invalid link_config 0x%x"
7233 " speed_cap_mask 0x%x\n",
7234 bp->port.link_config,
7235 bp->link_params.speed_cap_mask);
7241 BNX2X_ERR("NVRAM config error. "
7242 "BAD link speed link_config 0x%x\n",
7243 bp->port.link_config);
7244 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7245 bp->port.advertising = bp->port.supported;
7249 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7250 PORT_FEATURE_FLOW_CONTROL_MASK);
7251 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7252 !(bp->port.supported & SUPPORTED_Autoneg))
7253 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7255 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7256 " advertising 0x%x\n",
7257 bp->link_params.req_line_speed,
7258 bp->link_params.req_duplex,
7259 bp->link_params.req_flow_ctrl, bp->port.advertising);
7262 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7264 int port = BP_PORT(bp);
7267 bp->link_params.bp = bp;
7268 bp->link_params.port = port;
7270 bp->link_params.serdes_config =
7271 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7272 bp->link_params.lane_config =
7273 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7274 bp->link_params.ext_phy_config =
7276 dev_info.port_hw_config[port].external_phy_config);
7277 bp->link_params.speed_cap_mask =
7279 dev_info.port_hw_config[port].speed_capability_mask);
7281 bp->port.link_config =
7282 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7284 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7285 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7286 " link_config 0x%08x\n",
7287 bp->link_params.serdes_config,
7288 bp->link_params.lane_config,
7289 bp->link_params.ext_phy_config,
7290 bp->link_params.speed_cap_mask, bp->port.link_config);
7292 bp->link_params.switch_cfg = (bp->port.link_config &
7293 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7294 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7296 bnx2x_link_settings_requested(bp);
7298 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7299 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7300 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7301 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7302 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7303 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7304 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7305 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7306 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7307 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7310 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7312 int func = BP_FUNC(bp);
7316 bnx2x_get_common_hwinfo(bp);
7320 if (CHIP_IS_E1H(bp)) {
7322 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7325 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7326 FUNC_MF_CFG_E1HOV_TAG_MASK);
7327 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7331 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7333 func, bp->e1hov, bp->e1hov);
7335 BNX2X_DEV_INFO("Single function mode\n");
7337 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7338 " aborting\n", func);
7344 if (!BP_NOMCP(bp)) {
7345 bnx2x_get_port_hwinfo(bp);
7347 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7348 DRV_MSG_SEQ_NUMBER_MASK);
7349 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7353 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7354 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7355 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7356 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7357 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7358 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7359 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7360 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7361 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7362 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7363 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7365 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7373 /* only supposed to happen on emulation/FPGA */
7374 BNX2X_ERR("warning rendom MAC workaround active\n");
7375 random_ether_addr(bp->dev->dev_addr);
7376 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7382 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7384 int func = BP_FUNC(bp);
7387 mutex_init(&bp->port.phy_mutex);
7389 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7390 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7392 rc = bnx2x_get_hwinfo(bp);
7394 /* need to reset chip if undi was active */
7396 bnx2x_undi_unload(bp);
7398 if (CHIP_REV_IS_FPGA(bp))
7399 printk(KERN_ERR PFX "FPGA detected\n");
7401 if (BP_NOMCP(bp) && (func == 0))
7403 "MCP disabled, must load devices in order!\n");
7407 bp->flags &= ~TPA_ENABLE_FLAG;
7408 bp->dev->features &= ~NETIF_F_LRO;
7410 bp->flags |= TPA_ENABLE_FLAG;
7411 bp->dev->features |= NETIF_F_LRO;
7415 bp->tx_ring_size = MAX_TX_AVAIL;
7416 bp->rx_ring_size = MAX_RX_AVAIL;
7424 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7425 bp->current_interval = (poll ? poll : bp->timer_interval);
7427 init_timer(&bp->timer);
7428 bp->timer.expires = jiffies + bp->current_interval;
7429 bp->timer.data = (unsigned long) bp;
7430 bp->timer.function = bnx2x_timer;
7436 * ethtool service functions
7439 /* All ethtool functions called with rtnl_lock */
7441 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7443 struct bnx2x *bp = netdev_priv(dev);
7445 cmd->supported = bp->port.supported;
7446 cmd->advertising = bp->port.advertising;
7448 if (netif_carrier_ok(dev)) {
7449 cmd->speed = bp->link_vars.line_speed;
7450 cmd->duplex = bp->link_vars.duplex;
7452 cmd->speed = bp->link_params.req_line_speed;
7453 cmd->duplex = bp->link_params.req_duplex;
7458 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7459 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7460 if (vn_max_rate < cmd->speed)
7461 cmd->speed = vn_max_rate;
7464 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7466 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7468 switch (ext_phy_type) {
7469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7472 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7474 cmd->port = PORT_FIBRE;
7477 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7478 cmd->port = PORT_TP;
7481 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7482 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7483 bp->link_params.ext_phy_config);
7487 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7488 bp->link_params.ext_phy_config);
7492 cmd->port = PORT_TP;
7494 cmd->phy_address = bp->port.phy_addr;
7495 cmd->transceiver = XCVR_INTERNAL;
7497 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7498 cmd->autoneg = AUTONEG_ENABLE;
7500 cmd->autoneg = AUTONEG_DISABLE;
7505 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7506 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7507 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7508 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7509 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7510 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7511 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7516 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7518 struct bnx2x *bp = netdev_priv(dev);
7524 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7525 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7526 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7527 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7528 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7529 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7530 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7532 if (cmd->autoneg == AUTONEG_ENABLE) {
7533 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7534 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7538 /* advertise the requested speed and duplex if supported */
7539 cmd->advertising &= bp->port.supported;
7541 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7542 bp->link_params.req_duplex = DUPLEX_FULL;
7543 bp->port.advertising |= (ADVERTISED_Autoneg |
7546 } else { /* forced speed */
7547 /* advertise the requested speed and duplex if supported */
7548 switch (cmd->speed) {
7550 if (cmd->duplex == DUPLEX_FULL) {
7551 if (!(bp->port.supported &
7552 SUPPORTED_10baseT_Full)) {
7554 "10M full not supported\n");
7558 advertising = (ADVERTISED_10baseT_Full |
7561 if (!(bp->port.supported &
7562 SUPPORTED_10baseT_Half)) {
7564 "10M half not supported\n");
7568 advertising = (ADVERTISED_10baseT_Half |
7574 if (cmd->duplex == DUPLEX_FULL) {
7575 if (!(bp->port.supported &
7576 SUPPORTED_100baseT_Full)) {
7578 "100M full not supported\n");
7582 advertising = (ADVERTISED_100baseT_Full |
7585 if (!(bp->port.supported &
7586 SUPPORTED_100baseT_Half)) {
7588 "100M half not supported\n");
7592 advertising = (ADVERTISED_100baseT_Half |
7598 if (cmd->duplex != DUPLEX_FULL) {
7599 DP(NETIF_MSG_LINK, "1G half not supported\n");
7603 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7604 DP(NETIF_MSG_LINK, "1G full not supported\n");
7608 advertising = (ADVERTISED_1000baseT_Full |
7613 if (cmd->duplex != DUPLEX_FULL) {
7615 "2.5G half not supported\n");
7619 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7621 "2.5G full not supported\n");
7625 advertising = (ADVERTISED_2500baseX_Full |
7630 if (cmd->duplex != DUPLEX_FULL) {
7631 DP(NETIF_MSG_LINK, "10G half not supported\n");
7635 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7636 DP(NETIF_MSG_LINK, "10G full not supported\n");
7640 advertising = (ADVERTISED_10000baseT_Full |
7645 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7649 bp->link_params.req_line_speed = cmd->speed;
7650 bp->link_params.req_duplex = cmd->duplex;
7651 bp->port.advertising = advertising;
7654 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7655 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7656 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7657 bp->port.advertising);
7659 if (netif_running(dev)) {
7660 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7667 #define PHY_FW_VER_LEN 10
7669 static void bnx2x_get_drvinfo(struct net_device *dev,
7670 struct ethtool_drvinfo *info)
7672 struct bnx2x *bp = netdev_priv(dev);
7673 char phy_fw_ver[PHY_FW_VER_LEN];
7675 strcpy(info->driver, DRV_MODULE_NAME);
7676 strcpy(info->version, DRV_MODULE_VERSION);
7678 phy_fw_ver[0] = '\0';
7680 bnx2x_phy_hw_lock(bp);
7681 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7682 (bp->state != BNX2X_STATE_CLOSED),
7683 phy_fw_ver, PHY_FW_VER_LEN);
7684 bnx2x_phy_hw_unlock(bp);
7687 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7688 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7689 BCM_5710_FW_REVISION_VERSION,
7690 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7691 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7692 strcpy(info->bus_info, pci_name(bp->pdev));
7693 info->n_stats = BNX2X_NUM_STATS;
7694 info->testinfo_len = BNX2X_NUM_TESTS;
7695 info->eedump_len = bp->common.flash_size;
7696 info->regdump_len = 0;
7699 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7701 struct bnx2x *bp = netdev_priv(dev);
7703 if (bp->flags & NO_WOL_FLAG) {
7707 wol->supported = WAKE_MAGIC;
7709 wol->wolopts = WAKE_MAGIC;
7713 memset(&wol->sopass, 0, sizeof(wol->sopass));
7716 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7718 struct bnx2x *bp = netdev_priv(dev);
7720 if (wol->wolopts & ~WAKE_MAGIC)
7723 if (wol->wolopts & WAKE_MAGIC) {
7724 if (bp->flags & NO_WOL_FLAG)
7734 static u32 bnx2x_get_msglevel(struct net_device *dev)
7736 struct bnx2x *bp = netdev_priv(dev);
7738 return bp->msglevel;
7741 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7743 struct bnx2x *bp = netdev_priv(dev);
7745 if (capable(CAP_NET_ADMIN))
7746 bp->msglevel = level;
7749 static int bnx2x_nway_reset(struct net_device *dev)
7751 struct bnx2x *bp = netdev_priv(dev);
7756 if (netif_running(dev)) {
7757 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7764 static int bnx2x_get_eeprom_len(struct net_device *dev)
7766 struct bnx2x *bp = netdev_priv(dev);
7768 return bp->common.flash_size;
7771 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7773 int port = BP_PORT(bp);
7777 /* adjust timeout for emulation/FPGA */
7778 count = NVRAM_TIMEOUT_COUNT;
7779 if (CHIP_REV_IS_SLOW(bp))
7782 /* request access to nvram interface */
7783 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7784 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7786 for (i = 0; i < count*10; i++) {
7787 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7788 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7794 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7795 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7802 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7804 int port = BP_PORT(bp);
7808 /* adjust timeout for emulation/FPGA */
7809 count = NVRAM_TIMEOUT_COUNT;
7810 if (CHIP_REV_IS_SLOW(bp))
7813 /* relinquish nvram interface */
7814 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7815 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7817 for (i = 0; i < count*10; i++) {
7818 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7819 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7825 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7826 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7833 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7837 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7839 /* enable both bits, even on read */
7840 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7841 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7842 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7845 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7849 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7851 /* disable both bits, even after read */
7852 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7853 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7854 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7857 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7863 /* build the command word */
7864 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7866 /* need to clear DONE bit separately */
7867 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7869 /* address of the NVRAM to read from */
7870 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7871 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7873 /* issue a read command */
7874 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7876 /* adjust timeout for emulation/FPGA */
7877 count = NVRAM_TIMEOUT_COUNT;
7878 if (CHIP_REV_IS_SLOW(bp))
7881 /* wait for completion */
7884 for (i = 0; i < count; i++) {
7886 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7888 if (val & MCPR_NVM_COMMAND_DONE) {
7889 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7890 /* we read nvram data in cpu order
7891 * but ethtool sees it as an array of bytes
7892 * converting to big-endian will do the work */
7893 val = cpu_to_be32(val);
7903 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7910 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7912 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7917 if (offset + buf_size > bp->common.flash_size) {
7918 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7919 " buf_size (0x%x) > flash_size (0x%x)\n",
7920 offset, buf_size, bp->common.flash_size);
7924 /* request access to nvram interface */
7925 rc = bnx2x_acquire_nvram_lock(bp);
7929 /* enable access to nvram interface */
7930 bnx2x_enable_nvram_access(bp);
7932 /* read the first word(s) */
7933 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7934 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7935 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7936 memcpy(ret_buf, &val, 4);
7938 /* advance to the next dword */
7939 offset += sizeof(u32);
7940 ret_buf += sizeof(u32);
7941 buf_size -= sizeof(u32);
7946 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7947 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7948 memcpy(ret_buf, &val, 4);
7951 /* disable access to nvram interface */
7952 bnx2x_disable_nvram_access(bp);
7953 bnx2x_release_nvram_lock(bp);
7958 static int bnx2x_get_eeprom(struct net_device *dev,
7959 struct ethtool_eeprom *eeprom, u8 *eebuf)
7961 struct bnx2x *bp = netdev_priv(dev);
7964 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7965 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7966 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7967 eeprom->len, eeprom->len);
7969 /* parameters already validated in ethtool_get_eeprom */
7971 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7976 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7981 /* build the command word */
7982 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7984 /* need to clear DONE bit separately */
7985 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7987 /* write the data */
7988 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7990 /* address of the NVRAM to write to */
7991 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7992 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7994 /* issue the write command */
7995 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7997 /* adjust timeout for emulation/FPGA */
7998 count = NVRAM_TIMEOUT_COUNT;
7999 if (CHIP_REV_IS_SLOW(bp))
8002 /* wait for completion */
8004 for (i = 0; i < count; i++) {
8006 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8007 if (val & MCPR_NVM_COMMAND_DONE) {
8016 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8018 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8026 if (offset + buf_size > bp->common.flash_size) {
8027 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8028 " buf_size (0x%x) > flash_size (0x%x)\n",
8029 offset, buf_size, bp->common.flash_size);
8033 /* request access to nvram interface */
8034 rc = bnx2x_acquire_nvram_lock(bp);
8038 /* enable access to nvram interface */
8039 bnx2x_enable_nvram_access(bp);
8041 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8042 align_offset = (offset & ~0x03);
8043 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8046 val &= ~(0xff << BYTE_OFFSET(offset));
8047 val |= (*data_buf << BYTE_OFFSET(offset));
8049 /* nvram data is returned as an array of bytes
8050 * convert it back to cpu order */
8051 val = be32_to_cpu(val);
8053 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8057 /* disable access to nvram interface */
8058 bnx2x_disable_nvram_access(bp);
8059 bnx2x_release_nvram_lock(bp);
8064 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8072 if (buf_size == 1) /* ethtool */
8073 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8075 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8077 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8082 if (offset + buf_size > bp->common.flash_size) {
8083 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8084 " buf_size (0x%x) > flash_size (0x%x)\n",
8085 offset, buf_size, bp->common.flash_size);
8089 /* request access to nvram interface */
8090 rc = bnx2x_acquire_nvram_lock(bp);
8094 /* enable access to nvram interface */
8095 bnx2x_enable_nvram_access(bp);
8098 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8099 while ((written_so_far < buf_size) && (rc == 0)) {
8100 if (written_so_far == (buf_size - sizeof(u32)))
8101 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8102 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8103 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8104 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8105 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8107 memcpy(&val, data_buf, 4);
8109 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8111 /* advance to the next dword */
8112 offset += sizeof(u32);
8113 data_buf += sizeof(u32);
8114 written_so_far += sizeof(u32);
8118 /* disable access to nvram interface */
8119 bnx2x_disable_nvram_access(bp);
8120 bnx2x_release_nvram_lock(bp);
8125 static int bnx2x_set_eeprom(struct net_device *dev,
8126 struct ethtool_eeprom *eeprom, u8 *eebuf)
8128 struct bnx2x *bp = netdev_priv(dev);
8131 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8132 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8133 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8134 eeprom->len, eeprom->len);
8136 /* parameters already validated in ethtool_set_eeprom */
8138 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8139 if (eeprom->magic == 0x00504859)
8142 bnx2x_phy_hw_lock(bp);
8143 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8144 bp->link_params.ext_phy_config,
8145 (bp->state != BNX2X_STATE_CLOSED),
8146 eebuf, eeprom->len);
8147 if ((bp->state == BNX2X_STATE_OPEN) ||
8148 (bp->state == BNX2X_STATE_DISABLED)) {
8149 rc |= bnx2x_link_reset(&bp->link_params,
8151 rc |= bnx2x_phy_init(&bp->link_params,
8154 bnx2x_phy_hw_unlock(bp);
8156 } else /* Only the PMF can access the PHY */
8159 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8164 static int bnx2x_get_coalesce(struct net_device *dev,
8165 struct ethtool_coalesce *coal)
8167 struct bnx2x *bp = netdev_priv(dev);
8169 memset(coal, 0, sizeof(struct ethtool_coalesce));
8171 coal->rx_coalesce_usecs = bp->rx_ticks;
8172 coal->tx_coalesce_usecs = bp->tx_ticks;
8177 static int bnx2x_set_coalesce(struct net_device *dev,
8178 struct ethtool_coalesce *coal)
8180 struct bnx2x *bp = netdev_priv(dev);
8182 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8183 if (bp->rx_ticks > 3000)
8184 bp->rx_ticks = 3000;
8186 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8187 if (bp->tx_ticks > 0x3000)
8188 bp->tx_ticks = 0x3000;
8190 if (netif_running(dev))
8191 bnx2x_update_coalesce(bp);
8196 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8198 struct bnx2x *bp = netdev_priv(dev);
8202 if (data & ETH_FLAG_LRO) {
8203 if (!(dev->features & NETIF_F_LRO)) {
8204 dev->features |= NETIF_F_LRO;
8205 bp->flags |= TPA_ENABLE_FLAG;
8209 } else if (dev->features & NETIF_F_LRO) {
8210 dev->features &= ~NETIF_F_LRO;
8211 bp->flags &= ~TPA_ENABLE_FLAG;
8215 if (changed && netif_running(dev)) {
8216 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8217 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8223 static void bnx2x_get_ringparam(struct net_device *dev,
8224 struct ethtool_ringparam *ering)
8226 struct bnx2x *bp = netdev_priv(dev);
8228 ering->rx_max_pending = MAX_RX_AVAIL;
8229 ering->rx_mini_max_pending = 0;
8230 ering->rx_jumbo_max_pending = 0;
8232 ering->rx_pending = bp->rx_ring_size;
8233 ering->rx_mini_pending = 0;
8234 ering->rx_jumbo_pending = 0;
8236 ering->tx_max_pending = MAX_TX_AVAIL;
8237 ering->tx_pending = bp->tx_ring_size;
8240 static int bnx2x_set_ringparam(struct net_device *dev,
8241 struct ethtool_ringparam *ering)
8243 struct bnx2x *bp = netdev_priv(dev);
8246 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8247 (ering->tx_pending > MAX_TX_AVAIL) ||
8248 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8251 bp->rx_ring_size = ering->rx_pending;
8252 bp->tx_ring_size = ering->tx_pending;
8254 if (netif_running(dev)) {
8255 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8256 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8262 static void bnx2x_get_pauseparam(struct net_device *dev,
8263 struct ethtool_pauseparam *epause)
8265 struct bnx2x *bp = netdev_priv(dev);
8267 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8268 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8270 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8272 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8275 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8276 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8277 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8280 static int bnx2x_set_pauseparam(struct net_device *dev,
8281 struct ethtool_pauseparam *epause)
8283 struct bnx2x *bp = netdev_priv(dev);
8288 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8289 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8290 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8292 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8294 if (epause->rx_pause)
8295 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8297 if (epause->tx_pause)
8298 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8300 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8301 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8303 if (epause->autoneg) {
8304 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8305 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8309 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8310 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8314 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8316 if (netif_running(dev)) {
8317 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8324 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8326 struct bnx2x *bp = netdev_priv(dev);
8331 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8333 struct bnx2x *bp = netdev_priv(dev);
8339 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8342 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8343 dev->features |= NETIF_F_TSO6;
8345 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8346 dev->features &= ~NETIF_F_TSO6;
8352 static const struct {
8353 char string[ETH_GSTRING_LEN];
8354 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8355 { "register_test (offline)" },
8356 { "memory_test (offline)" },
8357 { "loopback_test (offline)" },
8358 { "nvram_test (online)" },
8359 { "interrupt_test (online)" },
8360 { "link_test (online)" },
8361 { "idle check (online)" },
8362 { "MC errors (online)" }
8365 static int bnx2x_self_test_count(struct net_device *dev)
8367 return BNX2X_NUM_TESTS;
8370 static int bnx2x_test_registers(struct bnx2x *bp)
8372 int idx, i, rc = -ENODEV;
8374 static const struct {
8379 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8380 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8381 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8382 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8383 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8384 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8385 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8386 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8387 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8388 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8389 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8390 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8391 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8392 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8393 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8394 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8395 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8396 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8397 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8398 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8399 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8400 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8401 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8402 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8403 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8404 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8405 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8406 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8407 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8408 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8409 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8410 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8411 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8412 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8413 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8414 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8415 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8416 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8418 { 0xffffffff, 0, 0x00000000 }
8421 if (!netif_running(bp->dev))
8424 /* Repeat the test twice:
8425 First by writing 0x00000000, second by writing 0xffffffff */
8426 for (idx = 0; idx < 2; idx++) {
8433 wr_val = 0xffffffff;
8437 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8438 u32 offset, mask, save_val, val;
8439 int port = BP_PORT(bp);
8441 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8442 mask = reg_tbl[i].mask;
8444 save_val = REG_RD(bp, offset);
8446 REG_WR(bp, offset, wr_val);
8447 val = REG_RD(bp, offset);
8449 /* Restore the original register's value */
8450 REG_WR(bp, offset, save_val);
8452 /* verify that value is as expected value */
8453 if ((val & mask) != (wr_val & mask))
8464 static int bnx2x_test_memory(struct bnx2x *bp)
8466 int i, j, rc = -ENODEV;
8468 static const struct {
8472 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8473 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8474 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8475 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8476 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8477 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8478 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8482 static const struct {
8487 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8488 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8489 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8490 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8491 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8492 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8494 { NULL, 0xffffffff, 0 }
8497 if (!netif_running(bp->dev))
8500 /* Go through all the memories */
8501 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8502 for (j = 0; j < mem_tbl[i].size; j++)
8503 REG_RD(bp, mem_tbl[i].offset + j*4);
8505 /* Check the parity status */
8506 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8507 val = REG_RD(bp, prty_tbl[i].offset);
8508 if (val & ~(prty_tbl[i].mask)) {
8510 "%s is 0x%x\n", prty_tbl[i].name, val);
8521 static void bnx2x_netif_start(struct bnx2x *bp)
8525 if (atomic_dec_and_test(&bp->intr_sem)) {
8526 if (netif_running(bp->dev)) {
8527 bnx2x_int_enable(bp);
8528 for_each_queue(bp, i)
8529 napi_enable(&bnx2x_fp(bp, i, napi));
8530 if (bp->state == BNX2X_STATE_OPEN)
8531 netif_wake_queue(bp->dev);
8536 static void bnx2x_netif_stop(struct bnx2x *bp)
8540 if (netif_running(bp->dev)) {
8541 netif_tx_disable(bp->dev);
8542 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8543 for_each_queue(bp, i)
8544 napi_disable(&bnx2x_fp(bp, i, napi));
8546 bnx2x_int_disable_sync(bp);
8549 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8554 while (bnx2x_link_test(bp) && cnt--)
8558 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8560 unsigned int pkt_size, num_pkts, i;
8561 struct sk_buff *skb;
8562 unsigned char *packet;
8563 struct bnx2x_fastpath *fp = &bp->fp[0];
8564 u16 tx_start_idx, tx_idx;
8565 u16 rx_start_idx, rx_idx;
8567 struct sw_tx_bd *tx_buf;
8568 struct eth_tx_bd *tx_bd;
8570 union eth_rx_cqe *cqe;
8572 struct sw_rx_bd *rx_buf;
8576 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8577 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8578 bnx2x_phy_hw_lock(bp);
8579 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8580 bnx2x_phy_hw_unlock(bp);
8582 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8583 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8584 bnx2x_phy_hw_lock(bp);
8585 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8586 bnx2x_phy_hw_unlock(bp);
8587 /* wait until link state is restored */
8588 bnx2x_wait_for_link(bp, link_up);
8594 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8597 goto test_loopback_exit;
8599 packet = skb_put(skb, pkt_size);
8600 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8601 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8602 for (i = ETH_HLEN; i < pkt_size; i++)
8603 packet[i] = (unsigned char) (i & 0xff);
8606 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8607 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8609 pkt_prod = fp->tx_pkt_prod++;
8610 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8611 tx_buf->first_bd = fp->tx_bd_prod;
8614 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8615 mapping = pci_map_single(bp->pdev, skb->data,
8616 skb_headlen(skb), PCI_DMA_TODEVICE);
8617 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8618 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8619 tx_bd->nbd = cpu_to_le16(1);
8620 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8621 tx_bd->vlan = cpu_to_le16(pkt_prod);
8622 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8623 ETH_TX_BD_FLAGS_END_BD);
8624 tx_bd->general_data = ((UNICAST_ADDRESS <<
8625 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8627 fp->hw_tx_prods->bds_prod =
8628 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8629 mb(); /* FW restriction: must not reorder writing nbd and packets */
8630 fp->hw_tx_prods->packets_prod =
8631 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8632 DOORBELL(bp, FP_IDX(fp), 0);
8638 bp->dev->trans_start = jiffies;
8642 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8643 if (tx_idx != tx_start_idx + num_pkts)
8644 goto test_loopback_exit;
8646 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8647 if (rx_idx != rx_start_idx + num_pkts)
8648 goto test_loopback_exit;
8650 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8651 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8652 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8653 goto test_loopback_rx_exit;
8655 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8656 if (len != pkt_size)
8657 goto test_loopback_rx_exit;
8659 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8661 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8662 for (i = ETH_HLEN; i < pkt_size; i++)
8663 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8664 goto test_loopback_rx_exit;
8668 test_loopback_rx_exit:
8669 bp->dev->last_rx = jiffies;
8671 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8672 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8673 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8674 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8676 /* Update producers */
8677 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8679 mmiowb(); /* keep prod updates ordered */
8682 bp->link_params.loopback_mode = LOOPBACK_NONE;
8687 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8691 if (!netif_running(bp->dev))
8692 return BNX2X_LOOPBACK_FAILED;
8694 bnx2x_netif_stop(bp);
8696 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8697 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8698 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8701 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8702 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8703 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8706 bnx2x_netif_start(bp);
8711 #define CRC32_RESIDUAL 0xdebb20e3
8713 static int bnx2x_test_nvram(struct bnx2x *bp)
8715 static const struct {
8719 { 0, 0x14 }, /* bootstrap */
8720 { 0x14, 0xec }, /* dir */
8721 { 0x100, 0x350 }, /* manuf_info */
8722 { 0x450, 0xf0 }, /* feature_info */
8723 { 0x640, 0x64 }, /* upgrade_key_info */
8725 { 0x708, 0x70 }, /* manuf_key_info */
8730 u8 *data = (u8 *)buf;
8734 rc = bnx2x_nvram_read(bp, 0, data, 4);
8736 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8737 goto test_nvram_exit;
8740 magic = be32_to_cpu(buf[0]);
8741 if (magic != 0x669955aa) {
8742 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8744 goto test_nvram_exit;
8747 for (i = 0; nvram_tbl[i].size; i++) {
8749 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8753 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8754 goto test_nvram_exit;
8757 csum = ether_crc_le(nvram_tbl[i].size, data);
8758 if (csum != CRC32_RESIDUAL) {
8760 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8762 goto test_nvram_exit;
8770 static int bnx2x_test_intr(struct bnx2x *bp)
8772 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8775 if (!netif_running(bp->dev))
8778 config->hdr.length_6b = 0;
8779 config->hdr.offset = 0;
8780 config->hdr.client_id = BP_CL_ID(bp);
8781 config->hdr.reserved1 = 0;
8783 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8784 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8785 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8787 bp->set_mac_pending++;
8788 for (i = 0; i < 10; i++) {
8789 if (!bp->set_mac_pending)
8791 msleep_interruptible(10);
8800 static void bnx2x_self_test(struct net_device *dev,
8801 struct ethtool_test *etest, u64 *buf)
8803 struct bnx2x *bp = netdev_priv(dev);
8805 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8807 if (!netif_running(dev))
8810 /* offline tests are not suppoerted in MF mode */
8812 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8814 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8817 link_up = bp->link_vars.link_up;
8818 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8819 bnx2x_nic_load(bp, LOAD_DIAG);
8820 /* wait until link state is restored */
8821 bnx2x_wait_for_link(bp, link_up);
8823 if (bnx2x_test_registers(bp) != 0) {
8825 etest->flags |= ETH_TEST_FL_FAILED;
8827 if (bnx2x_test_memory(bp) != 0) {
8829 etest->flags |= ETH_TEST_FL_FAILED;
8831 buf[2] = bnx2x_test_loopback(bp, link_up);
8833 etest->flags |= ETH_TEST_FL_FAILED;
8835 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8836 bnx2x_nic_load(bp, LOAD_NORMAL);
8837 /* wait until link state is restored */
8838 bnx2x_wait_for_link(bp, link_up);
8840 if (bnx2x_test_nvram(bp) != 0) {
8842 etest->flags |= ETH_TEST_FL_FAILED;
8844 if (bnx2x_test_intr(bp) != 0) {
8846 etest->flags |= ETH_TEST_FL_FAILED;
8849 if (bnx2x_link_test(bp) != 0) {
8851 etest->flags |= ETH_TEST_FL_FAILED;
8853 buf[7] = bnx2x_mc_assert(bp);
8855 etest->flags |= ETH_TEST_FL_FAILED;
8857 #ifdef BNX2X_EXTRA_DEBUG
8858 bnx2x_panic_dump(bp);
8862 static const struct {
8866 #define STATS_FLAGS_PORT 1
8867 #define STATS_FLAGS_FUNC 2
8868 u8 string[ETH_GSTRING_LEN];
8869 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8870 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8871 8, STATS_FLAGS_FUNC, "rx_bytes" },
8872 { STATS_OFFSET32(error_bytes_received_hi),
8873 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8874 { STATS_OFFSET32(total_bytes_transmitted_hi),
8875 8, STATS_FLAGS_FUNC, "tx_bytes" },
8876 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8877 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8878 { STATS_OFFSET32(total_unicast_packets_received_hi),
8879 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8880 { STATS_OFFSET32(total_multicast_packets_received_hi),
8881 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8882 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8883 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8884 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8885 8, STATS_FLAGS_FUNC, "tx_packets" },
8886 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8887 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8888 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8889 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8890 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8891 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8892 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8893 8, STATS_FLAGS_PORT, "rx_align_errors" },
8894 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8895 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8896 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8897 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8898 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8899 8, STATS_FLAGS_PORT, "tx_deferred" },
8900 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8901 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8902 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8903 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8904 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8905 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8906 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8907 8, STATS_FLAGS_PORT, "rx_fragments" },
8908 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8909 8, STATS_FLAGS_PORT, "rx_jabbers" },
8910 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8911 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8912 { STATS_OFFSET32(jabber_packets_received),
8913 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8914 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8915 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8916 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8917 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8918 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8919 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8920 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8921 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8922 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8923 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8924 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8925 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8926 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8927 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8928 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8929 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8930 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8931 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8932 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8933 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8934 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8935 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8936 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8937 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8938 { STATS_OFFSET32(mac_filter_discard),
8939 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8940 { STATS_OFFSET32(no_buff_discard),
8941 4, STATS_FLAGS_FUNC, "rx_discards" },
8942 { STATS_OFFSET32(xxoverflow_discard),
8943 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8944 { STATS_OFFSET32(brb_drop_hi),
8945 8, STATS_FLAGS_PORT, "brb_discard" },
8946 { STATS_OFFSET32(brb_truncate_hi),
8947 8, STATS_FLAGS_PORT, "brb_truncate" },
8948 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8949 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8950 { STATS_OFFSET32(rx_skb_alloc_failed),
8951 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8952 /* 42 */{ STATS_OFFSET32(hw_csum_err),
8953 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8956 #define IS_NOT_E1HMF_STAT(bp, i) \
8957 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8959 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8961 struct bnx2x *bp = netdev_priv(dev);
8964 switch (stringset) {
8966 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8967 if (IS_NOT_E1HMF_STAT(bp, i))
8969 strcpy(buf + j*ETH_GSTRING_LEN,
8970 bnx2x_stats_arr[i].string);
8976 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8981 static int bnx2x_get_stats_count(struct net_device *dev)
8983 struct bnx2x *bp = netdev_priv(dev);
8984 int i, num_stats = 0;
8986 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8987 if (IS_NOT_E1HMF_STAT(bp, i))
8994 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8995 struct ethtool_stats *stats, u64 *buf)
8997 struct bnx2x *bp = netdev_priv(dev);
8998 u32 *hw_stats = (u32 *)&bp->eth_stats;
9001 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9002 if (IS_NOT_E1HMF_STAT(bp, i))
9005 if (bnx2x_stats_arr[i].size == 0) {
9006 /* skip this counter */
9011 if (bnx2x_stats_arr[i].size == 4) {
9012 /* 4-byte counter */
9013 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9017 /* 8-byte counter */
9018 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9019 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9024 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9026 struct bnx2x *bp = netdev_priv(dev);
9027 int port = BP_PORT(bp);
9030 if (!netif_running(dev))
9039 for (i = 0; i < (data * 2); i++) {
9041 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9042 bp->link_params.hw_led_mode,
9043 bp->link_params.chip_id);
9045 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9046 bp->link_params.hw_led_mode,
9047 bp->link_params.chip_id);
9049 msleep_interruptible(500);
9050 if (signal_pending(current))
9054 if (bp->link_vars.link_up)
9055 bnx2x_set_led(bp, port, LED_MODE_OPER,
9056 bp->link_vars.line_speed,
9057 bp->link_params.hw_led_mode,
9058 bp->link_params.chip_id);
9063 static struct ethtool_ops bnx2x_ethtool_ops = {
9064 .get_settings = bnx2x_get_settings,
9065 .set_settings = bnx2x_set_settings,
9066 .get_drvinfo = bnx2x_get_drvinfo,
9067 .get_wol = bnx2x_get_wol,
9068 .set_wol = bnx2x_set_wol,
9069 .get_msglevel = bnx2x_get_msglevel,
9070 .set_msglevel = bnx2x_set_msglevel,
9071 .nway_reset = bnx2x_nway_reset,
9072 .get_link = ethtool_op_get_link,
9073 .get_eeprom_len = bnx2x_get_eeprom_len,
9074 .get_eeprom = bnx2x_get_eeprom,
9075 .set_eeprom = bnx2x_set_eeprom,
9076 .get_coalesce = bnx2x_get_coalesce,
9077 .set_coalesce = bnx2x_set_coalesce,
9078 .get_ringparam = bnx2x_get_ringparam,
9079 .set_ringparam = bnx2x_set_ringparam,
9080 .get_pauseparam = bnx2x_get_pauseparam,
9081 .set_pauseparam = bnx2x_set_pauseparam,
9082 .get_rx_csum = bnx2x_get_rx_csum,
9083 .set_rx_csum = bnx2x_set_rx_csum,
9084 .get_tx_csum = ethtool_op_get_tx_csum,
9085 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9086 .set_flags = bnx2x_set_flags,
9087 .get_flags = ethtool_op_get_flags,
9088 .get_sg = ethtool_op_get_sg,
9089 .set_sg = ethtool_op_set_sg,
9090 .get_tso = ethtool_op_get_tso,
9091 .set_tso = bnx2x_set_tso,
9092 .self_test_count = bnx2x_self_test_count,
9093 .self_test = bnx2x_self_test,
9094 .get_strings = bnx2x_get_strings,
9095 .phys_id = bnx2x_phys_id,
9096 .get_stats_count = bnx2x_get_stats_count,
9097 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9100 /* end of ethtool_ops */
9102 /****************************************************************************
9103 * General service functions
9104 ****************************************************************************/
9106 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9110 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9114 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9115 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9116 PCI_PM_CTRL_PME_STATUS));
9118 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9119 /* delay required during transition out of D3hot */
9124 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9128 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9130 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9133 /* No more memory access after this point until
9134 * device is brought back to D0.
9145 * net_device service functions
9148 static int bnx2x_poll(struct napi_struct *napi, int budget)
9150 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9152 struct bnx2x *bp = fp->bp;
9155 #ifdef BNX2X_STOP_ON_ERROR
9156 if (unlikely(bp->panic))
9160 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9161 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9162 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9164 bnx2x_update_fpsb_idx(fp);
9166 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9167 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9168 bnx2x_tx_int(fp, budget);
9170 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9171 work_done = bnx2x_rx_int(fp, budget);
9173 rmb(); /* bnx2x_has_work() reads the status block */
9175 /* must not complete if we consumed full budget */
9176 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9178 #ifdef BNX2X_STOP_ON_ERROR
9181 netif_rx_complete(bp->dev, napi);
9183 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9184 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9185 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9186 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9192 /* we split the first BD into headers and data BDs
9193 * to ease the pain of our fellow micocode engineers
9194 * we use one mapping for both BDs
9195 * So far this has only been observed to happen
9196 * in Other Operating Systems(TM)
9198 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9199 struct bnx2x_fastpath *fp,
9200 struct eth_tx_bd **tx_bd, u16 hlen,
9201 u16 bd_prod, int nbd)
9203 struct eth_tx_bd *h_tx_bd = *tx_bd;
9204 struct eth_tx_bd *d_tx_bd;
9206 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9208 /* first fix first BD */
9209 h_tx_bd->nbd = cpu_to_le16(nbd);
9210 h_tx_bd->nbytes = cpu_to_le16(hlen);
9212 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9213 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9214 h_tx_bd->addr_lo, h_tx_bd->nbd);
9216 /* now get a new data BD
9217 * (after the pbd) and fill it */
9218 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9219 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9221 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9222 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9224 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9225 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9226 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9228 /* this marks the BD as one that has no individual mapping
9229 * the FW ignores this flag in a BD not marked start
9231 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9232 DP(NETIF_MSG_TX_QUEUED,
9233 "TSO split data size is %d (%x:%x)\n",
9234 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9236 /* update tx_bd for marking the last BD flag */
9242 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9245 csum = (u16) ~csum_fold(csum_sub(csum,
9246 csum_partial(t_header - fix, fix, 0)));
9249 csum = (u16) ~csum_fold(csum_add(csum,
9250 csum_partial(t_header, -fix, 0)));
9252 return swab16(csum);
9255 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9259 if (skb->ip_summed != CHECKSUM_PARTIAL)
9263 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9265 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9266 rc |= XMIT_CSUM_TCP;
9270 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9271 rc |= XMIT_CSUM_TCP;
9275 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9278 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9284 /* check if packet requires linearization (packet is too fragmented) */
9285 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9290 int first_bd_sz = 0;
9292 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9293 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9295 if (xmit_type & XMIT_GSO) {
9296 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9297 /* Check if LSO packet needs to be copied:
9298 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9299 int wnd_size = MAX_FETCH_BD - 3;
9300 /* Number of widnows to check */
9301 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9306 /* Headers length */
9307 hlen = (int)(skb_transport_header(skb) - skb->data) +
9310 /* Amount of data (w/o headers) on linear part of SKB*/
9311 first_bd_sz = skb_headlen(skb) - hlen;
9313 wnd_sum = first_bd_sz;
9315 /* Calculate the first sum - it's special */
9316 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9318 skb_shinfo(skb)->frags[frag_idx].size;
9320 /* If there was data on linear skb data - check it */
9321 if (first_bd_sz > 0) {
9322 if (unlikely(wnd_sum < lso_mss)) {
9327 wnd_sum -= first_bd_sz;
9330 /* Others are easier: run through the frag list and
9331 check all windows */
9332 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9334 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9336 if (unlikely(wnd_sum < lso_mss)) {
9341 skb_shinfo(skb)->frags[wnd_idx].size;
9345 /* in non-LSO too fragmented packet should always
9352 if (unlikely(to_copy))
9353 DP(NETIF_MSG_TX_QUEUED,
9354 "Linearization IS REQUIRED for %s packet. "
9355 "num_frags %d hlen %d first_bd_sz %d\n",
9356 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9357 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9362 /* called with netif_tx_lock
9363 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9364 * netif_wake_queue()
9366 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9368 struct bnx2x *bp = netdev_priv(dev);
9369 struct bnx2x_fastpath *fp;
9370 struct sw_tx_bd *tx_buf;
9371 struct eth_tx_bd *tx_bd;
9372 struct eth_tx_parse_bd *pbd = NULL;
9373 u16 pkt_prod, bd_prod;
9376 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9377 int vlan_off = (bp->e1hov ? 4 : 0);
9381 #ifdef BNX2X_STOP_ON_ERROR
9382 if (unlikely(bp->panic))
9383 return NETDEV_TX_BUSY;
9386 fp_index = (smp_processor_id() % bp->num_queues);
9387 fp = &bp->fp[fp_index];
9389 if (unlikely(bnx2x_tx_avail(bp->fp) <
9390 (skb_shinfo(skb)->nr_frags + 3))) {
9391 bp->eth_stats.driver_xoff++,
9392 netif_stop_queue(dev);
9393 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9394 return NETDEV_TX_BUSY;
9397 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9398 " gso type %x xmit_type %x\n",
9399 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9400 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9402 /* First, check if we need to linearaize the skb
9403 (due to FW restrictions) */
9404 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9405 /* Statistics of linearization */
9407 if (skb_linearize(skb) != 0) {
9408 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9409 "silently dropping this SKB\n");
9410 dev_kfree_skb_any(skb);
9416 Please read carefully. First we use one BD which we mark as start,
9417 then for TSO or xsum we have a parsing info BD,
9418 and only then we have the rest of the TSO BDs.
9419 (don't forget to mark the last one as last,
9420 and to unmap only AFTER you write to the BD ...)
9421 And above all, all pdb sizes are in words - NOT DWORDS!
9424 pkt_prod = fp->tx_pkt_prod++;
9425 bd_prod = TX_BD(fp->tx_bd_prod);
9427 /* get a tx_buf and first BD */
9428 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9429 tx_bd = &fp->tx_desc_ring[bd_prod];
9431 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9432 tx_bd->general_data = (UNICAST_ADDRESS <<
9433 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9434 tx_bd->general_data |= 1; /* header nbd */
9436 /* remember the first BD of the packet */
9437 tx_buf->first_bd = fp->tx_bd_prod;
9440 DP(NETIF_MSG_TX_QUEUED,
9441 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9442 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9444 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9445 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9446 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9449 tx_bd->vlan = cpu_to_le16(pkt_prod);
9453 /* turn on parsing and get a BD */
9454 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9455 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9457 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9460 if (xmit_type & XMIT_CSUM) {
9461 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9463 /* for now NS flag is not used in Linux */
9464 pbd->global_data = (hlen |
9465 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9466 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9468 pbd->ip_hlen = (skb_transport_header(skb) -
9469 skb_network_header(skb)) / 2;
9471 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9473 pbd->total_hlen = cpu_to_le16(hlen);
9474 hlen = hlen*2 - vlan_off;
9476 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9478 if (xmit_type & XMIT_CSUM_V4)
9479 tx_bd->bd_flags.as_bitfield |=
9480 ETH_TX_BD_FLAGS_IP_CSUM;
9482 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9484 if (xmit_type & XMIT_CSUM_TCP) {
9485 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9488 s8 fix = SKB_CS_OFF(skb); /* signed! */
9490 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9491 pbd->cs_offset = fix / 2;
9493 DP(NETIF_MSG_TX_QUEUED,
9494 "hlen %d offset %d fix %d csum before fix %x\n",
9495 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9498 /* HW bug: fixup the CSUM */
9499 pbd->tcp_pseudo_csum =
9500 bnx2x_csum_fix(skb_transport_header(skb),
9503 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9504 pbd->tcp_pseudo_csum);
9508 mapping = pci_map_single(bp->pdev, skb->data,
9509 skb_headlen(skb), PCI_DMA_TODEVICE);
9511 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9512 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9513 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9514 tx_bd->nbd = cpu_to_le16(nbd);
9515 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9517 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9518 " nbytes %d flags %x vlan %x\n",
9519 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9520 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9521 le16_to_cpu(tx_bd->vlan));
9523 if (xmit_type & XMIT_GSO) {
9525 DP(NETIF_MSG_TX_QUEUED,
9526 "TSO packet len %d hlen %d total len %d tso size %d\n",
9527 skb->len, hlen, skb_headlen(skb),
9528 skb_shinfo(skb)->gso_size);
9530 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9532 if (unlikely(skb_headlen(skb) > hlen))
9533 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9536 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9537 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9538 pbd->tcp_flags = pbd_tcp_flags(skb);
9540 if (xmit_type & XMIT_GSO_V4) {
9541 pbd->ip_id = swab16(ip_hdr(skb)->id);
9542 pbd->tcp_pseudo_csum =
9543 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9545 0, IPPROTO_TCP, 0));
9548 pbd->tcp_pseudo_csum =
9549 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9550 &ipv6_hdr(skb)->daddr,
9551 0, IPPROTO_TCP, 0));
9553 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9557 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9559 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9560 tx_bd = &fp->tx_desc_ring[bd_prod];
9562 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9563 frag->size, PCI_DMA_TODEVICE);
9565 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9566 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9567 tx_bd->nbytes = cpu_to_le16(frag->size);
9568 tx_bd->vlan = cpu_to_le16(pkt_prod);
9569 tx_bd->bd_flags.as_bitfield = 0;
9571 DP(NETIF_MSG_TX_QUEUED,
9572 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9573 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9574 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9577 /* now at last mark the BD as the last BD */
9578 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9580 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9581 tx_bd, tx_bd->bd_flags.as_bitfield);
9583 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9585 /* now send a tx doorbell, counting the next BD
9586 * if the packet contains or ends with it
9588 if (TX_BD_POFF(bd_prod) < nbd)
9592 DP(NETIF_MSG_TX_QUEUED,
9593 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9594 " tcp_flags %x xsum %x seq %u hlen %u\n",
9595 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9596 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9597 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9599 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9601 fp->hw_tx_prods->bds_prod =
9602 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9603 mb(); /* FW restriction: must not reorder writing nbd and packets */
9604 fp->hw_tx_prods->packets_prod =
9605 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9606 DOORBELL(bp, FP_IDX(fp), 0);
9610 fp->tx_bd_prod += nbd;
9611 dev->trans_start = jiffies;
9613 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9614 netif_stop_queue(dev);
9615 bp->eth_stats.driver_xoff++;
9616 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9617 netif_wake_queue(dev);
9621 return NETDEV_TX_OK;
9624 /* called with rtnl_lock */
9625 static int bnx2x_open(struct net_device *dev)
9627 struct bnx2x *bp = netdev_priv(dev);
9629 bnx2x_set_power_state(bp, PCI_D0);
9631 return bnx2x_nic_load(bp, LOAD_OPEN);
9634 /* called with rtnl_lock */
9635 static int bnx2x_close(struct net_device *dev)
9637 struct bnx2x *bp = netdev_priv(dev);
9639 /* Unload the driver, release IRQs */
9640 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9641 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9642 if (!CHIP_REV_IS_SLOW(bp))
9643 bnx2x_set_power_state(bp, PCI_D3hot);
9648 /* called with netif_tx_lock from set_multicast */
9649 static void bnx2x_set_rx_mode(struct net_device *dev)
9651 struct bnx2x *bp = netdev_priv(dev);
9652 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9653 int port = BP_PORT(bp);
9655 if (bp->state != BNX2X_STATE_OPEN) {
9656 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9660 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9662 if (dev->flags & IFF_PROMISC)
9663 rx_mode = BNX2X_RX_MODE_PROMISC;
9665 else if ((dev->flags & IFF_ALLMULTI) ||
9666 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9667 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9669 else { /* some multicasts */
9670 if (CHIP_IS_E1(bp)) {
9672 struct dev_mc_list *mclist;
9673 struct mac_configuration_cmd *config =
9674 bnx2x_sp(bp, mcast_config);
9676 for (i = 0, mclist = dev->mc_list;
9677 mclist && (i < dev->mc_count);
9678 i++, mclist = mclist->next) {
9680 config->config_table[i].
9681 cam_entry.msb_mac_addr =
9682 swab16(*(u16 *)&mclist->dmi_addr[0]);
9683 config->config_table[i].
9684 cam_entry.middle_mac_addr =
9685 swab16(*(u16 *)&mclist->dmi_addr[2]);
9686 config->config_table[i].
9687 cam_entry.lsb_mac_addr =
9688 swab16(*(u16 *)&mclist->dmi_addr[4]);
9689 config->config_table[i].cam_entry.flags =
9691 config->config_table[i].
9692 target_table_entry.flags = 0;
9693 config->config_table[i].
9694 target_table_entry.client_id = 0;
9695 config->config_table[i].
9696 target_table_entry.vlan_id = 0;
9699 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9700 config->config_table[i].
9701 cam_entry.msb_mac_addr,
9702 config->config_table[i].
9703 cam_entry.middle_mac_addr,
9704 config->config_table[i].
9705 cam_entry.lsb_mac_addr);
9707 old = config->hdr.length_6b;
9709 for (; i < old; i++) {
9710 if (CAM_IS_INVALID(config->
9712 i--; /* already invalidated */
9716 CAM_INVALIDATE(config->
9721 if (CHIP_REV_IS_SLOW(bp))
9722 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9724 offset = BNX2X_MAX_MULTICAST*(1 + port);
9726 config->hdr.length_6b = i;
9727 config->hdr.offset = offset;
9728 config->hdr.client_id = BP_CL_ID(bp);
9729 config->hdr.reserved1 = 0;
9731 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9732 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9733 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9736 /* Accept one or more multicasts */
9737 struct dev_mc_list *mclist;
9738 u32 mc_filter[MC_HASH_SIZE];
9739 u32 crc, bit, regidx;
9742 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9744 for (i = 0, mclist = dev->mc_list;
9745 mclist && (i < dev->mc_count);
9746 i++, mclist = mclist->next) {
9748 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9749 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9750 mclist->dmi_addr[0], mclist->dmi_addr[1],
9751 mclist->dmi_addr[2], mclist->dmi_addr[3],
9752 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9754 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9755 bit = (crc >> 24) & 0xff;
9758 mc_filter[regidx] |= (1 << bit);
9761 for (i = 0; i < MC_HASH_SIZE; i++)
9762 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9767 bp->rx_mode = rx_mode;
9768 bnx2x_set_storm_rx_mode(bp);
9771 /* called with rtnl_lock */
9772 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9774 struct sockaddr *addr = p;
9775 struct bnx2x *bp = netdev_priv(dev);
9777 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9780 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9781 if (netif_running(dev)) {
9783 bnx2x_set_mac_addr_e1(bp);
9785 bnx2x_set_mac_addr_e1h(bp);
9791 /* called with rtnl_lock */
9792 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9794 struct mii_ioctl_data *data = if_mii(ifr);
9795 struct bnx2x *bp = netdev_priv(dev);
9800 data->phy_id = bp->port.phy_addr;
9807 if (!netif_running(dev))
9810 mutex_lock(&bp->port.phy_mutex);
9811 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9812 DEFAULT_PHY_DEV_ADDR,
9813 (data->reg_num & 0x1f), &mii_regval);
9814 data->val_out = mii_regval;
9815 mutex_unlock(&bp->port.phy_mutex);
9820 if (!capable(CAP_NET_ADMIN))
9823 if (!netif_running(dev))
9826 mutex_lock(&bp->port.phy_mutex);
9827 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9828 DEFAULT_PHY_DEV_ADDR,
9829 (data->reg_num & 0x1f), data->val_in);
9830 mutex_unlock(&bp->port.phy_mutex);
9841 /* called with rtnl_lock */
9842 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9844 struct bnx2x *bp = netdev_priv(dev);
9847 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9848 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9851 /* This does not race with packet allocation
9852 * because the actual alloc size is
9853 * only updated as part of load
9857 if (netif_running(dev)) {
9858 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9859 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9865 static void bnx2x_tx_timeout(struct net_device *dev)
9867 struct bnx2x *bp = netdev_priv(dev);
9869 #ifdef BNX2X_STOP_ON_ERROR
9873 /* This allows the netif to be shutdown gracefully before resetting */
9874 schedule_work(&bp->reset_task);
9878 /* called with rtnl_lock */
9879 static void bnx2x_vlan_rx_register(struct net_device *dev,
9880 struct vlan_group *vlgrp)
9882 struct bnx2x *bp = netdev_priv(dev);
9885 if (netif_running(dev))
9886 bnx2x_set_client_config(bp);
9891 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9892 static void poll_bnx2x(struct net_device *dev)
9894 struct bnx2x *bp = netdev_priv(dev);
9896 disable_irq(bp->pdev->irq);
9897 bnx2x_interrupt(bp->pdev->irq, dev);
9898 enable_irq(bp->pdev->irq);
9902 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9903 struct net_device *dev)
9908 SET_NETDEV_DEV(dev, &pdev->dev);
9909 bp = netdev_priv(dev);
9914 bp->func = PCI_FUNC(pdev->devfn);
9916 rc = pci_enable_device(pdev);
9918 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9922 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9923 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9926 goto err_out_disable;
9929 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9930 printk(KERN_ERR PFX "Cannot find second PCI device"
9931 " base address, aborting\n");
9933 goto err_out_disable;
9936 if (atomic_read(&pdev->enable_cnt) == 1) {
9937 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9939 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9941 goto err_out_disable;
9944 pci_set_master(pdev);
9945 pci_save_state(pdev);
9948 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9949 if (bp->pm_cap == 0) {
9950 printk(KERN_ERR PFX "Cannot find power management"
9951 " capability, aborting\n");
9953 goto err_out_release;
9956 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9957 if (bp->pcie_cap == 0) {
9958 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9961 goto err_out_release;
9964 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9965 bp->flags |= USING_DAC_FLAG;
9966 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9967 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9968 " failed, aborting\n");
9970 goto err_out_release;
9973 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9974 printk(KERN_ERR PFX "System does not support DMA,"
9977 goto err_out_release;
9980 dev->mem_start = pci_resource_start(pdev, 0);
9981 dev->base_addr = dev->mem_start;
9982 dev->mem_end = pci_resource_end(pdev, 0);
9984 dev->irq = pdev->irq;
9986 bp->regview = ioremap_nocache(dev->base_addr,
9987 pci_resource_len(pdev, 0));
9989 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9991 goto err_out_release;
9994 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9995 min_t(u64, BNX2X_DB_SIZE,
9996 pci_resource_len(pdev, 2)));
9997 if (!bp->doorbells) {
9998 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10000 goto err_out_unmap;
10003 bnx2x_set_power_state(bp, PCI_D0);
10005 /* clean indirect addresses */
10006 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10007 PCICFG_VENDOR_ID_OFFSET);
10008 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10009 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10010 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10011 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10013 dev->hard_start_xmit = bnx2x_start_xmit;
10014 dev->watchdog_timeo = TX_TIMEOUT;
10016 dev->ethtool_ops = &bnx2x_ethtool_ops;
10017 dev->open = bnx2x_open;
10018 dev->stop = bnx2x_close;
10019 dev->set_multicast_list = bnx2x_set_rx_mode;
10020 dev->set_mac_address = bnx2x_change_mac_addr;
10021 dev->do_ioctl = bnx2x_ioctl;
10022 dev->change_mtu = bnx2x_change_mtu;
10023 dev->tx_timeout = bnx2x_tx_timeout;
10025 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10027 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10028 dev->poll_controller = poll_bnx2x;
10030 dev->features |= NETIF_F_SG;
10031 dev->features |= NETIF_F_HW_CSUM;
10032 if (bp->flags & USING_DAC_FLAG)
10033 dev->features |= NETIF_F_HIGHDMA;
10035 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10037 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10038 dev->features |= NETIF_F_TSO6;
10044 iounmap(bp->regview);
10045 bp->regview = NULL;
10047 if (bp->doorbells) {
10048 iounmap(bp->doorbells);
10049 bp->doorbells = NULL;
10053 if (atomic_read(&pdev->enable_cnt) == 1)
10054 pci_release_regions(pdev);
10057 pci_disable_device(pdev);
10058 pci_set_drvdata(pdev, NULL);
10064 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10066 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10068 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10072 /* return value of 1=2.5GHz 2=5GHz */
10073 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10075 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10077 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10081 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10082 const struct pci_device_id *ent)
10084 static int version_printed;
10085 struct net_device *dev = NULL;
10088 DECLARE_MAC_BUF(mac);
10090 if (version_printed++ == 0)
10091 printk(KERN_INFO "%s", version);
10093 /* dev zeroed in init_etherdev */
10094 dev = alloc_etherdev(sizeof(*bp));
10096 printk(KERN_ERR PFX "Cannot allocate net device\n");
10100 netif_carrier_off(dev);
10102 bp = netdev_priv(dev);
10103 bp->msglevel = debug;
10105 rc = bnx2x_init_dev(pdev, dev);
10111 rc = register_netdev(dev);
10113 dev_err(&pdev->dev, "Cannot register net device\n");
10114 goto init_one_exit;
10117 pci_set_drvdata(pdev, dev);
10119 rc = bnx2x_init_bp(bp);
10121 unregister_netdev(dev);
10122 goto init_one_exit;
10125 bp->common.name = board_info[ent->driver_data].name;
10126 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10127 " IRQ %d, ", dev->name, bp->common.name,
10128 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10129 bnx2x_get_pcie_width(bp),
10130 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10131 dev->base_addr, bp->pdev->irq);
10132 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10137 iounmap(bp->regview);
10140 iounmap(bp->doorbells);
10144 if (atomic_read(&pdev->enable_cnt) == 1)
10145 pci_release_regions(pdev);
10147 pci_disable_device(pdev);
10148 pci_set_drvdata(pdev, NULL);
10153 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10155 struct net_device *dev = pci_get_drvdata(pdev);
10159 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10162 bp = netdev_priv(dev);
10164 unregister_netdev(dev);
10167 iounmap(bp->regview);
10170 iounmap(bp->doorbells);
10174 if (atomic_read(&pdev->enable_cnt) == 1)
10175 pci_release_regions(pdev);
10177 pci_disable_device(pdev);
10178 pci_set_drvdata(pdev, NULL);
10181 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10183 struct net_device *dev = pci_get_drvdata(pdev);
10187 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10190 bp = netdev_priv(dev);
10194 pci_save_state(pdev);
10196 if (!netif_running(dev)) {
10201 netif_device_detach(dev);
10203 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10205 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10212 static int bnx2x_resume(struct pci_dev *pdev)
10214 struct net_device *dev = pci_get_drvdata(pdev);
10219 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10222 bp = netdev_priv(dev);
10226 pci_restore_state(pdev);
10228 if (!netif_running(dev)) {
10233 bnx2x_set_power_state(bp, PCI_D0);
10234 netif_device_attach(dev);
10236 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10244 * bnx2x_io_error_detected - called when PCI error is detected
10245 * @pdev: Pointer to PCI device
10246 * @state: The current pci connection state
10248 * This function is called after a PCI bus error affecting
10249 * this device has been detected.
10251 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10252 pci_channel_state_t state)
10254 struct net_device *dev = pci_get_drvdata(pdev);
10255 struct bnx2x *bp = netdev_priv(dev);
10259 netif_device_detach(dev);
10261 if (netif_running(dev))
10262 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10264 pci_disable_device(pdev);
10268 /* Request a slot reset */
10269 return PCI_ERS_RESULT_NEED_RESET;
10273 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10274 * @pdev: Pointer to PCI device
10276 * Restart the card from scratch, as if from a cold-boot.
10278 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10280 struct net_device *dev = pci_get_drvdata(pdev);
10281 struct bnx2x *bp = netdev_priv(dev);
10285 if (pci_enable_device(pdev)) {
10286 dev_err(&pdev->dev,
10287 "Cannot re-enable PCI device after reset\n");
10289 return PCI_ERS_RESULT_DISCONNECT;
10292 pci_set_master(pdev);
10293 pci_restore_state(pdev);
10295 if (netif_running(dev))
10296 bnx2x_set_power_state(bp, PCI_D0);
10300 return PCI_ERS_RESULT_RECOVERED;
10304 * bnx2x_io_resume - called when traffic can start flowing again
10305 * @pdev: Pointer to PCI device
10307 * This callback is called when the error recovery driver tells us that
10308 * its OK to resume normal operation.
10310 static void bnx2x_io_resume(struct pci_dev *pdev)
10312 struct net_device *dev = pci_get_drvdata(pdev);
10313 struct bnx2x *bp = netdev_priv(dev);
10317 if (netif_running(dev))
10318 bnx2x_nic_load(bp, LOAD_OPEN);
10320 netif_device_attach(dev);
10325 static struct pci_error_handlers bnx2x_err_handler = {
10326 .error_detected = bnx2x_io_error_detected,
10327 .slot_reset = bnx2x_io_slot_reset,
10328 .resume = bnx2x_io_resume,
10331 static struct pci_driver bnx2x_pci_driver = {
10332 .name = DRV_MODULE_NAME,
10333 .id_table = bnx2x_pci_tbl,
10334 .probe = bnx2x_init_one,
10335 .remove = __devexit_p(bnx2x_remove_one),
10336 .suspend = bnx2x_suspend,
10337 .resume = bnx2x_resume,
10338 .err_handler = &bnx2x_err_handler,
10341 static int __init bnx2x_init(void)
10343 return pci_register_driver(&bnx2x_pci_driver);
10346 static void __exit bnx2x_cleanup(void)
10348 pci_unregister_driver(&bnx2x_pci_driver);
10351 module_init(bnx2x_init);
10352 module_exit(bnx2x_cleanup);