1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
245 BNX2X_ERR("dmae timeout!\n");
251 mutex_unlock(&bp->dmae_mutex);
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 struct dmae_command *dmae = &bp->init_dmae;
257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 mutex_lock(&bp->dmae_mutex);
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 DMAE_CMD_ENDIANITY_DW_SWAP |
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
309 while (*wb_comp != DMAE_COMP_VAL) {
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
318 BNX2X_ERR("dmae timeout!\n");
323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327 mutex_unlock(&bp->dmae_mutex);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
345 REG_RD_DMAE(bp, reg, wb_data, 2);
347 return HILO_U64(wb_data[0], wb_data[1]);
351 static int bnx2x_mc_assert(struct bnx2x *bp)
355 u32 row0, row1, row2, row3;
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
472 static void bnx2x_fw_dump(struct bnx2x *bp)
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 printk(KERN_CONT "%s", (char *)data);
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x *bp)
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507 BNX2X_ERR("begin crash dump -----------------\n");
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
582 " spq_prod_idx(%u)\n",
583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
588 BNX2X_ERR("end crash dump -----------------\n");
591 static void bnx2x_int_enable(struct bnx2x *bp)
593 int port = BP_PORT(bp);
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
611 REG_WR(bp, addr, val);
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
617 val, port, addr, msix);
619 REG_WR(bp, addr, val);
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
626 /* enable nig attention */
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 static void bnx2x_int_disable(struct bnx2x *bp)
638 int port = BP_PORT(bp);
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
655 static void bnx2x_int_disable_sync(struct bnx2x *bp)
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
660 /* disable interrupt handling */
661 atomic_inc(&bp->intr_sem);
662 /* prevent the HW from sending interrupts */
663 bnx2x_int_disable(bp);
665 /* make sure all ISRs are done */
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
673 synchronize_irq(bp->pdev->irq);
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
682 * General service functions
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686 u8 storm, u16 index, u8 op, u8 update)
688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
689 struct igu_ack_register igu_ack;
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
703 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705 struct host_status_block *fpsb = fp->status_blk;
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
720 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
722 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
724 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
727 if ((fp->rx_comp_cons != rx_cons_sb) ||
728 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
729 (fp->tx_pkt_prod != fp->tx_pkt_cons))
735 static u16 bnx2x_ack_int(struct bnx2x *bp)
737 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
738 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
740 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
741 result, BAR_IGU_INTMEM + igu_addr);
744 #warning IGU_DEBUG active
746 BNX2X_ERR("read %x from IGU\n", result);
747 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
755 * fast path service functions
758 /* free skb in the packet ring at pos idx
759 * return idx of last bd freed
761 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
764 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
765 struct eth_tx_bd *tx_bd;
766 struct sk_buff *skb = tx_buf->skb;
767 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
770 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
774 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
775 tx_bd = &fp->tx_desc_ring[bd_idx];
776 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
777 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
779 nbd = le16_to_cpu(tx_bd->nbd) - 1;
780 new_cons = nbd + tx_buf->first_bd;
781 #ifdef BNX2X_STOP_ON_ERROR
782 if (nbd > (MAX_SKB_FRAGS + 2)) {
783 BNX2X_ERR("BAD nbd!\n");
788 /* Skip a parse bd and the TSO split header bd
789 since they have no mapping */
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
794 ETH_TX_BD_FLAGS_TCP_CSUM |
795 ETH_TX_BD_FLAGS_SW_LSO)) {
797 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_bd = &fp->tx_desc_ring[bd_idx];
799 /* is this a TSO split header bd? */
800 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
809 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
810 tx_bd = &fp->tx_desc_ring[bd_idx];
811 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
812 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
814 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 tx_buf->first_bd = 0;
826 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
832 barrier(); /* Tell compiler that prod and cons can change */
833 prod = fp->tx_bd_prod;
834 cons = fp->tx_bd_cons;
836 /* NUM_TX_RINGS = number of "next-page" entries
837 It will be used as a threshold */
838 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
840 #ifdef BNX2X_STOP_ON_ERROR
842 WARN_ON(used > fp->bp->tx_ring_size);
843 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
846 return (s16)(fp->bp->tx_ring_size) - used;
849 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
851 struct bnx2x *bp = fp->bp;
852 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
855 #ifdef BNX2X_STOP_ON_ERROR
856 if (unlikely(bp->panic))
860 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
861 sw_cons = fp->tx_pkt_cons;
863 while (sw_cons != hw_cons) {
866 pkt_cons = TX_BD(sw_cons);
868 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
870 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
871 hw_cons, sw_cons, pkt_cons);
873 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
875 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
878 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 fp->tx_pkt_cons = sw_cons;
887 fp->tx_bd_cons = bd_cons;
889 /* Need to make the tx_cons update visible to start_xmit()
890 * before checking for netif_queue_stopped(). Without the
891 * memory barrier, there is a small possibility that start_xmit()
892 * will miss it and cause the queue to be stopped forever.
896 /* TBD need a thresh? */
897 if (unlikely(netif_queue_stopped(bp->dev))) {
899 netif_tx_lock(bp->dev);
901 if (netif_queue_stopped(bp->dev) &&
902 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
903 netif_wake_queue(bp->dev);
905 netif_tx_unlock(bp->dev);
909 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
910 union eth_rx_cqe *rr_cqe)
912 struct bnx2x *bp = fp->bp;
913 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
914 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
917 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
918 FP_IDX(fp), cid, command, bp->state,
919 rr_cqe->ramrod_cqe.ramrod_type);
924 switch (command | fp->state) {
925 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
926 BNX2X_FP_STATE_OPENING):
927 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
929 fp->state = BNX2X_FP_STATE_OPEN;
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
933 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
935 fp->state = BNX2X_FP_STATE_HALTED;
939 BNX2X_ERR("unexpected MC reply (%d) "
940 "fp->state is %x\n", command, fp->state);
943 mb(); /* force bnx2x_wait_ramrod() to see the change */
947 switch (command | bp->state) {
948 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
949 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
950 bp->state = BNX2X_STATE_OPEN;
953 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
954 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
955 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
956 fp->state = BNX2X_FP_STATE_HALTED;
959 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
960 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
961 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
965 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
966 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
967 bp->set_mac_pending = 0;
970 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
971 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
975 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
979 mb(); /* force bnx2x_wait_ramrod() to see the change */
982 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
983 struct bnx2x_fastpath *fp, u16 index)
985 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
986 struct page *page = sw_buf->page;
987 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
989 /* Skip "next page" elements */
993 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
994 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
995 __free_pages(page, PAGES_PER_SGE_SHIFT);
1002 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1003 struct bnx2x_fastpath *fp, int last)
1007 for (i = 0; i < last; i++)
1008 bnx2x_free_rx_sge(bp, fp, i);
1011 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1012 struct bnx2x_fastpath *fp, u16 index)
1014 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1015 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1016 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1019 if (unlikely(page == NULL))
1022 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1023 PCI_DMA_FROMDEVICE);
1024 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1025 __free_pages(page, PAGES_PER_SGE_SHIFT);
1029 sw_buf->page = page;
1030 pci_unmap_addr_set(sw_buf, mapping, mapping);
1032 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1033 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1038 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1039 struct bnx2x_fastpath *fp, u16 index)
1041 struct sk_buff *skb;
1042 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1043 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1046 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1047 if (unlikely(skb == NULL))
1050 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1051 PCI_DMA_FROMDEVICE);
1052 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058 pci_unmap_addr_set(rx_buf, mapping, mapping);
1060 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1061 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1066 /* note that we are not allocating a new skb,
1067 * we are just moving one from cons to prod
1068 * we are not creating a new mapping,
1069 * so there is no need to check for dma_mapping_error().
1071 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1072 struct sk_buff *skb, u16 cons, u16 prod)
1074 struct bnx2x *bp = fp->bp;
1075 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1076 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1077 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1078 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1080 pci_dma_sync_single_for_device(bp->pdev,
1081 pci_unmap_addr(cons_rx_buf, mapping),
1082 bp->rx_offset + RX_COPY_THRESH,
1083 PCI_DMA_FROMDEVICE);
1085 prod_rx_buf->skb = cons_rx_buf->skb;
1086 pci_unmap_addr_set(prod_rx_buf, mapping,
1087 pci_unmap_addr(cons_rx_buf, mapping));
1088 *prod_bd = *cons_bd;
1091 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1094 u16 last_max = fp->last_max_sge;
1096 if (SUB_S16(idx, last_max) > 0)
1097 fp->last_max_sge = idx;
1100 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1104 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1105 int idx = RX_SGE_CNT * i - 1;
1107 for (j = 0; j < 2; j++) {
1108 SGE_MASK_CLEAR_BIT(fp, idx);
1114 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1115 struct eth_fast_path_rx_cqe *fp_cqe)
1117 struct bnx2x *bp = fp->bp;
1118 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1119 le16_to_cpu(fp_cqe->len_on_bd)) >>
1121 u16 last_max, last_elem, first_elem;
1128 /* First mark all used pages */
1129 for (i = 0; i < sge_len; i++)
1130 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1132 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1133 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1135 /* Here we assume that the last SGE index is the biggest */
1136 prefetch((void *)(fp->sge_mask));
1137 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1139 last_max = RX_SGE(fp->last_max_sge);
1140 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1141 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1143 /* If ring is not full */
1144 if (last_elem + 1 != first_elem)
1147 /* Now update the prod */
1148 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1149 if (likely(fp->sge_mask[i]))
1152 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1153 delta += RX_SGE_MASK_ELEM_SZ;
1157 fp->rx_sge_prod += delta;
1158 /* clear page-end entries */
1159 bnx2x_clear_sge_mask_next_elems(fp);
1162 DP(NETIF_MSG_RX_STATUS,
1163 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1164 fp->last_max_sge, fp->rx_sge_prod);
1167 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1170 memset(fp->sge_mask, 0xff,
1171 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1173 /* Clear the two last indeces in the page to 1:
1174 these are the indeces that correspond to the "next" element,
1175 hence will never be indicated and should be removed from
1176 the calculations. */
1177 bnx2x_clear_sge_mask_next_elems(fp);
1180 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1181 struct sk_buff *skb, u16 cons, u16 prod)
1183 struct bnx2x *bp = fp->bp;
1184 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1185 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1186 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1189 /* move empty skb from pool to prod and map it */
1190 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1191 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1192 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1193 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1195 /* move partial skb from cons to pool (don't unmap yet) */
1196 fp->tpa_pool[queue] = *cons_rx_buf;
1198 /* mark bin state as start - print error if current state != stop */
1199 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1200 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1202 fp->tpa_state[queue] = BNX2X_TPA_START;
1204 /* point prod_bd to new skb */
1205 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1206 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1208 #ifdef BNX2X_STOP_ON_ERROR
1209 fp->tpa_queue_used |= (1 << queue);
1210 #ifdef __powerpc64__
1211 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1213 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1215 fp->tpa_queue_used);
1219 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1220 struct sk_buff *skb,
1221 struct eth_fast_path_rx_cqe *fp_cqe,
1224 struct sw_rx_page *rx_pg, old_rx_pg;
1226 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1227 u32 i, frag_len, frag_size, pages;
1231 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1232 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1234 /* This is needed in order to enable forwarding support */
1236 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1237 max(frag_size, (u32)len_on_bd));
1239 #ifdef BNX2X_STOP_ON_ERROR
1240 if (pages > 8*PAGES_PER_SGE) {
1241 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1243 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1244 fp_cqe->pkt_len, len_on_bd);
1250 /* Run through the SGL and compose the fragmented skb */
1251 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1252 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1254 /* FW gives the indices of the SGE as if the ring is an array
1255 (meaning that "next" element will consume 2 indices) */
1256 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1257 rx_pg = &fp->rx_page_ring[sge_idx];
1261 /* If we fail to allocate a substitute page, we simply stop
1262 where we are and drop the whole packet */
1263 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1264 if (unlikely(err)) {
1265 bp->eth_stats.rx_skb_alloc_failed++;
1269 /* Unmap the page as we r going to pass it to the stack */
1270 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1271 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1273 /* Add one frag and update the appropriate fields in the skb */
1274 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1276 skb->data_len += frag_len;
1277 skb->truesize += frag_len;
1278 skb->len += frag_len;
1280 frag_size -= frag_len;
1286 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1287 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1290 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1291 struct sk_buff *skb = rx_buf->skb;
1293 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1295 /* Unmap skb in the pool anyway, as we are going to change
1296 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1298 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1299 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1301 if (likely(new_skb)) {
1302 /* fix ip xsum and give it to the stack */
1303 /* (no need to map the new skb) */
1306 prefetch(((char *)(skb)) + 128);
1308 #ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... "
1311 "pad %d len %d rx_buf_size %d\n",
1312 pad, len, bp->rx_buf_size);
1318 skb_reserve(skb, pad);
1321 skb->protocol = eth_type_trans(skb, bp->dev);
1322 skb->ip_summed = CHECKSUM_UNNECESSARY;
1327 iph = (struct iphdr *)skb->data;
1329 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1332 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1333 &cqe->fast_path_cqe, cqe_idx)) {
1335 if ((bp->vlgrp != NULL) &&
1336 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337 PARSING_FLAGS_VLAN))
1338 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1339 le16_to_cpu(cqe->fast_path_cqe.
1343 netif_receive_skb(skb);
1345 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1346 " - dropping packet!\n");
1350 bp->dev->last_rx = jiffies;
1352 /* put new skb in bin */
1353 fp->tpa_pool[queue].skb = new_skb;
1356 /* else drop the packet and keep the buffer in the bin */
1357 DP(NETIF_MSG_RX_STATUS,
1358 "Failed to allocate new skb - dropping packet!\n");
1359 bp->eth_stats.rx_skb_alloc_failed++;
1362 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1365 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1366 struct bnx2x_fastpath *fp,
1367 u16 bd_prod, u16 rx_comp_prod,
1370 struct tstorm_eth_rx_producers rx_prods = {0};
1373 /* Update producers */
1374 rx_prods.bd_prod = bd_prod;
1375 rx_prods.cqe_prod = rx_comp_prod;
1376 rx_prods.sge_prod = rx_sge_prod;
1378 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1379 REG_WR(bp, BAR_TSTRORM_INTMEM +
1380 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1381 ((u32 *)&rx_prods)[i]);
1383 DP(NETIF_MSG_RX_STATUS,
1384 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1385 bd_prod, rx_comp_prod, rx_sge_prod);
1388 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 struct bnx2x *bp = fp->bp;
1391 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1392 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1396 #ifdef BNX2X_STOP_ON_ERROR
1397 if (unlikely(bp->panic))
1401 /* CQ "next element" is of the size of the regular element,
1402 that's why it's ok here */
1403 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1404 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1407 bd_cons = fp->rx_bd_cons;
1408 bd_prod = fp->rx_bd_prod;
1409 bd_prod_fw = bd_prod;
1410 sw_comp_cons = fp->rx_comp_cons;
1411 sw_comp_prod = fp->rx_comp_prod;
1413 /* Memory barrier necessary as speculative reads of the rx
1414 * buffer can be ahead of the index in the status block
1418 DP(NETIF_MSG_RX_STATUS,
1419 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1420 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1422 while (sw_comp_cons != hw_comp_cons) {
1423 struct sw_rx_bd *rx_buf = NULL;
1424 struct sk_buff *skb;
1425 union eth_rx_cqe *cqe;
1429 comp_ring_cons = RCQ_BD(sw_comp_cons);
1430 bd_prod = RX_BD(bd_prod);
1431 bd_cons = RX_BD(bd_cons);
1433 cqe = &fp->rx_comp_ring[comp_ring_cons];
1434 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1436 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1437 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1438 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1439 cqe->fast_path_cqe.rss_hash_result,
1440 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1441 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1443 /* is this a slowpath msg? */
1444 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1445 bnx2x_sp_event(fp, cqe);
1448 /* this is an rx packet */
1450 rx_buf = &fp->rx_buf_ring[bd_cons];
1452 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1453 pad = cqe->fast_path_cqe.placement_offset;
1455 /* If CQE is marked both TPA_START and TPA_END
1456 it is a non-TPA CQE */
1457 if ((!fp->disable_tpa) &&
1458 (TPA_TYPE(cqe_fp_flags) !=
1459 (TPA_TYPE_START | TPA_TYPE_END))) {
1460 queue = cqe->fast_path_cqe.queue_index;
1462 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1463 DP(NETIF_MSG_RX_STATUS,
1464 "calling tpa_start on queue %d\n",
1467 bnx2x_tpa_start(fp, queue, skb,
1472 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1473 DP(NETIF_MSG_RX_STATUS,
1474 "calling tpa_stop on queue %d\n",
1477 if (!BNX2X_RX_SUM_FIX(cqe))
1478 BNX2X_ERR("STOP on none TCP "
1481 /* This is a size of the linear data
1483 len = le16_to_cpu(cqe->fast_path_cqe.
1485 bnx2x_tpa_stop(bp, fp, queue, pad,
1486 len, cqe, comp_ring_cons);
1487 #ifdef BNX2X_STOP_ON_ERROR
1492 bnx2x_update_sge_prod(fp,
1493 &cqe->fast_path_cqe);
1498 pci_dma_sync_single_for_device(bp->pdev,
1499 pci_unmap_addr(rx_buf, mapping),
1500 pad + RX_COPY_THRESH,
1501 PCI_DMA_FROMDEVICE);
1503 prefetch(((char *)(skb)) + 128);
1505 /* is this an error packet? */
1506 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1507 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons);
1510 bp->eth_stats.rx_err_discard_pkt++;
1514 /* Since we don't have a jumbo ring
1515 * copy small packets if mtu > 1500
1517 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1518 (len <= RX_COPY_THRESH)) {
1519 struct sk_buff *new_skb;
1521 new_skb = netdev_alloc_skb(bp->dev,
1523 if (new_skb == NULL) {
1524 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped "
1526 "because of alloc failure\n");
1527 bp->eth_stats.rx_skb_alloc_failed++;
1532 skb_copy_from_linear_data_offset(skb, pad,
1533 new_skb->data + pad, len);
1534 skb_reserve(new_skb, pad);
1535 skb_put(new_skb, len);
1537 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1541 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1542 pci_unmap_single(bp->pdev,
1543 pci_unmap_addr(rx_buf, mapping),
1544 bp->rx_buf_use_size,
1545 PCI_DMA_FROMDEVICE);
1546 skb_reserve(skb, pad);
1550 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because "
1552 "of alloc failure\n");
1553 bp->eth_stats.rx_skb_alloc_failed++;
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1559 skb->protocol = eth_type_trans(skb, bp->dev);
1561 skb->ip_summed = CHECKSUM_NONE;
1563 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 skb->ip_summed = CHECKSUM_UNNECESSARY;
1566 bp->eth_stats.hw_csum_err++;
1571 if ((bp->vlgrp != NULL) &&
1572 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1573 PARSING_FLAGS_VLAN))
1574 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1575 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1578 netif_receive_skb(skb);
1580 bp->dev->last_rx = jiffies;
1585 bd_cons = NEXT_RX_IDX(bd_cons);
1586 bd_prod = NEXT_RX_IDX(bd_prod);
1587 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1590 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1591 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1593 if (rx_pkt == budget)
1597 fp->rx_bd_cons = bd_cons;
1598 fp->rx_bd_prod = bd_prod_fw;
1599 fp->rx_comp_cons = sw_comp_cons;
1600 fp->rx_comp_prod = sw_comp_prod;
1602 /* Update producers */
1603 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1605 mmiowb(); /* keep prod updates ordered */
1607 fp->rx_pkt += rx_pkt;
1613 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct bnx2x_fastpath *fp = fp_cookie;
1616 struct bnx2x *bp = fp->bp;
1617 struct net_device *dev = bp->dev;
1618 int index = FP_IDX(fp);
1620 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1621 index, FP_SB_ID(fp));
1622 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1624 #ifdef BNX2X_STOP_ON_ERROR
1625 if (unlikely(bp->panic))
1629 prefetch(fp->rx_cons_sb);
1630 prefetch(fp->tx_cons_sb);
1631 prefetch(&fp->status_blk->c_status_block.status_block_index);
1632 prefetch(&fp->status_blk->u_status_block.status_block_index);
1634 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1639 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1641 struct net_device *dev = dev_instance;
1642 struct bnx2x *bp = netdev_priv(dev);
1643 u16 status = bnx2x_ack_int(bp);
1646 /* Return here if interrupt is shared and it's not for us */
1647 if (unlikely(status == 0)) {
1648 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1651 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1653 #ifdef BNX2X_STOP_ON_ERROR
1654 if (unlikely(bp->panic))
1658 /* Return here if interrupt is disabled */
1659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1664 mask = 0x2 << bp->fp[0].sb_id;
1665 if (status & mask) {
1666 struct bnx2x_fastpath *fp = &bp->fp[0];
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1673 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1679 if (unlikely(status & 0x1)) {
1680 schedule_work(&bp->sp_task);
1688 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1694 /* end of fast path */
1696 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1701 * General service functions
1704 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1707 u32 resource_bit = (1 << resource);
1708 u8 port = BP_PORT(bp);
1711 /* Validating that the resource is within range */
1712 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1714 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1715 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1719 /* Validating that the resource is not already taken */
1720 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
1730 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1732 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1733 if (lock_status & resource_bit)
1738 DP(NETIF_MSG_HW, "Timeout\n");
1742 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1745 u32 resource_bit = (1 << resource);
1746 u8 port = BP_PORT(bp);
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1756 /* Validating that the resource is currently taken */
1757 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1764 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773 mutex_lock(&bp->port.phy_mutex);
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1780 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788 mutex_unlock(&bp->port.phy_mutex);
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1806 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 u32 spio_mask = (1 << spio_num);
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1855 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1911 static void bnx2x_link_report(struct bnx2x *bp)
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918 printk("%d Mbps ", bp->link_vars.line_speed);
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1923 printk("half duplex");
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1931 printk(", transmit ");
1933 printk("flow control ON");
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 if (!BP_NOMCP(bp)) {
1948 /* Initialize link parameters structure variables */
1949 bp->link_params.mtu = bp->dev->mtu;
1951 bnx2x_phy_hw_lock(bp);
1952 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953 bnx2x_phy_hw_unlock(bp);
1955 if (bp->link_vars.link_up)
1956 bnx2x_link_report(bp);
1958 bnx2x_calc_fc_adv(bp);
1962 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1966 static void bnx2x_link_set(struct bnx2x *bp)
1968 if (!BP_NOMCP(bp)) {
1969 bnx2x_phy_hw_lock(bp);
1970 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971 bnx2x_phy_hw_unlock(bp);
1973 bnx2x_calc_fc_adv(bp);
1975 BNX2X_ERR("Bootcode is missing -not setting link\n");
1978 static void bnx2x__link_reset(struct bnx2x *bp)
1980 if (!BP_NOMCP(bp)) {
1981 bnx2x_phy_hw_lock(bp);
1982 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983 bnx2x_phy_hw_unlock(bp);
1985 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1988 static u8 bnx2x_link_test(struct bnx2x *bp)
1992 bnx2x_phy_hw_lock(bp);
1993 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994 bnx2x_phy_hw_unlock(bp);
1999 /* Calculates the sum of vn_min_rates.
2000 It's needed for further normalizing of the min_rates.
2005 0 - if all the min_rates are 0.
2006 In the later case fainess algorithm should be deactivated.
2007 If not all min_rates are zero then those that are zeroes will
2010 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2012 int i, port = BP_PORT(bp);
2016 for (i = 0; i < E1HVN_MAX; i++) {
2018 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022 /* If min rate is zero - set it to 1 */
2024 vn_min_rate = DEF_MIN_RATE;
2028 wsum += vn_min_rate;
2032 /* ... only if all min rates are zeros - disable FAIRNESS */
2039 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2042 struct cmng_struct_per_port *m_cmng_port)
2044 u32 r_param = port_rate / 8;
2045 int port = BP_PORT(bp);
2048 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2050 /* Enable minmax only if we are in e1hmf mode */
2052 u32 fair_periodic_timeout_usec;
2055 /* Enable rate shaping and fairness */
2056 m_cmng_port->flags.cmng_vn_enable = 1;
2057 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058 m_cmng_port->flags.rate_shaping_enable = 1;
2061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062 " fairness will be disabled\n");
2064 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065 m_cmng_port->rs_vars.rs_periodic_timeout =
2066 RS_PERIODIC_TIMEOUT_USEC / 4;
2068 /* this is the threshold below which no timer arming will occur
2069 1.25 coefficient is for the threshold to be a little bigger
2070 than the real time, to compensate for timer in-accuracy */
2071 m_cmng_port->rs_vars.rs_threshold =
2072 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2074 /* resolution of fairness timer */
2075 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077 t_fair = T_FAIR_COEF / port_rate;
2079 /* this is the threshold below which we won't arm
2080 the timer anymore */
2081 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2083 /* we multiply by 1e3/8 to get bytes/msec.
2084 We don't want the credits to pass a credit
2085 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086 m_cmng_port->fair_vars.upper_bound =
2087 r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 m_cmng_port->fair_vars.fairness_timeout =
2090 fair_periodic_timeout_usec / 4;
2093 /* Disable rate shaping and fairness */
2094 m_cmng_port->flags.cmng_vn_enable = 0;
2095 m_cmng_port->flags.fairness_enable = 0;
2096 m_cmng_port->flags.rate_shaping_enable = 0;
2099 "Single function mode minmax will be disabled\n");
2102 /* Store it to internal memory */
2103 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106 ((u32 *)(m_cmng_port))[i]);
2109 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110 u32 wsum, u16 port_rate,
2111 struct cmng_struct_per_port *m_cmng_port)
2113 struct rate_shaping_vars_per_vn m_rs_vn;
2114 struct fairness_vars_per_vn m_fair_vn;
2115 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116 u16 vn_min_rate, vn_max_rate;
2119 /* If function is hidden - set min and max to zeroes */
2120 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2125 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128 if current min rate is zero - set it to 1.
2129 This is a requirment of the algorithm. */
2130 if ((vn_min_rate == 0) && wsum)
2131 vn_min_rate = DEF_MIN_RATE;
2132 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2136 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2137 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2139 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2142 /* global vn counter - maximal Mbps for this vn */
2143 m_rs_vn.vn_counter.rate = vn_max_rate;
2145 /* quota - number of bytes transmitted in this period */
2146 m_rs_vn.vn_counter.quota =
2147 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2149 #ifdef BNX2X_PER_PROT_QOS
2150 /* per protocol counter */
2151 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152 /* maximal Mbps for this protocol */
2153 m_rs_vn.protocol_counters[protocol].rate =
2154 protocol_max_rate[protocol];
2155 /* the quota in each timer period -
2156 number of bytes transmitted in this period */
2157 m_rs_vn.protocol_counters[protocol].quota =
2158 (u32)(rs_periodic_timeout_usec *
2160 protocol_counters[protocol].rate/8));
2165 /* credit for each period of the fairness algorithm:
2166 number of bytes in T_FAIR (the vn share the port rate).
2167 wsum should not be larger than 10000, thus
2168 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169 m_fair_vn.vn_credit_delta =
2170 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173 m_fair_vn.vn_credit_delta);
2176 #ifdef BNX2X_PER_PROT_QOS
2178 u32 protocolWeightSum = 0;
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181 protocolWeightSum +=
2182 drvInit.protocol_min_rate[protocol];
2183 /* per protocol counter -
2184 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185 if (protocolWeightSum > 0) {
2187 protocol < NUM_OF_PROTOCOLS; protocol++)
2188 /* credit for each period of the
2189 fairness algorithm - number of bytes in
2190 T_FAIR (the protocol share the vn rate) */
2191 m_fair_vn.protocol_credit_delta[protocol] =
2192 (u32)((vn_min_rate / 8) * t_fair *
2193 protocol_min_rate / protocolWeightSum);
2198 /* Store it to internal memory */
2199 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202 ((u32 *)(&m_rs_vn))[i]);
2204 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207 ((u32 *)(&m_fair_vn))[i]);
2210 /* This function is called upon link interrupt */
2211 static void bnx2x_link_attn(struct bnx2x *bp)
2215 /* Make sure that we are synced with the current statistics */
2216 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2218 bnx2x_phy_hw_lock(bp);
2219 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220 bnx2x_phy_hw_unlock(bp);
2222 if (bp->link_vars.link_up) {
2224 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225 struct host_port_stats *pstats;
2227 pstats = bnx2x_sp(bp, port_stats);
2228 /* reset old bmac stats */
2229 memset(&(pstats->mac_stx[0]), 0,
2230 sizeof(struct mac_stx));
2232 if ((bp->state == BNX2X_STATE_OPEN) ||
2233 (bp->state == BNX2X_STATE_DISABLED))
2234 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2237 /* indicate link status */
2238 bnx2x_link_report(bp);
2243 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244 if (vn == BP_E1HVN(bp))
2247 func = ((vn << 1) | BP_PORT(bp));
2249 /* Set the attention towards other drivers
2251 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2256 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257 struct cmng_struct_per_port m_cmng_port;
2259 int port = BP_PORT(bp);
2261 /* Init RATE SHAPING and FAIRNESS contexts */
2262 wsum = bnx2x_calc_vn_wsum(bp);
2263 bnx2x_init_port_minmax(bp, (int)wsum,
2264 bp->link_vars.line_speed,
2267 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269 wsum, bp->link_vars.line_speed,
2274 static void bnx2x__link_status_update(struct bnx2x *bp)
2276 if (bp->state != BNX2X_STATE_OPEN)
2279 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2281 if (bp->link_vars.link_up)
2282 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2284 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286 /* indicate link status */
2287 bnx2x_link_report(bp);
2290 static void bnx2x_pmf_update(struct bnx2x *bp)
2292 int port = BP_PORT(bp);
2296 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2298 /* enable nig attention */
2299 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2303 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2311 * General service functions
2314 /* the slow path queue is odd since completions arrive on the fastpath ring */
2315 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316 u32 data_hi, u32 data_lo, int common)
2318 int func = BP_FUNC(bp);
2320 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2322 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2326 #ifdef BNX2X_STOP_ON_ERROR
2327 if (unlikely(bp->panic))
2331 spin_lock_bh(&bp->spq_lock);
2333 if (!bp->spq_left) {
2334 BNX2X_ERR("BUG! SPQ ring full!\n");
2335 spin_unlock_bh(&bp->spq_lock);
2340 /* CID needs port number to be encoded int it */
2341 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2344 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2346 bp->spq_prod_bd->hdr.type |=
2347 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2349 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2364 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2367 spin_unlock_bh(&bp->spq_lock);
2371 /* acquire split MCP access lock register */
2372 static int bnx2x_lock_alr(struct bnx2x *bp)
2379 for (j = 0; j < i*10; j++) {
2381 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383 if (val & (1L << 31))
2388 if (!(val & (1L << 31))) {
2389 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2396 /* Release split MCP access lock register */
2397 static void bnx2x_unlock_alr(struct bnx2x *bp)
2401 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2404 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2406 struct host_def_status_block *def_sb = bp->def_status_blk;
2409 barrier(); /* status block is written to by the chip */
2411 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2412 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2415 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2416 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2419 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2420 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2423 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2424 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2427 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2428 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 * slow path service functions
2438 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2440 int port = BP_PORT(bp);
2441 int func = BP_FUNC(bp);
2442 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2443 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2444 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2445 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2446 NIG_REG_MASK_INTERRUPT_PORT0;
2448 if (~bp->aeu_mask & (asserted & 0xff))
2449 BNX2X_ERR("IGU ERROR\n");
2450 if (bp->attn_state & asserted)
2451 BNX2X_ERR("IGU ERROR\n");
2453 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2454 bp->aeu_mask, asserted);
2455 bp->aeu_mask &= ~(asserted & 0xff);
2456 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2458 REG_WR(bp, aeu_addr, bp->aeu_mask);
2460 bp->attn_state |= asserted;
2462 if (asserted & ATTN_HARD_WIRED_MASK) {
2463 if (asserted & ATTN_NIG_FOR_FUNC) {
2465 /* save nig interrupt mask */
2466 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2467 REG_WR(bp, nig_int_mask_addr, 0);
2469 bnx2x_link_attn(bp);
2471 /* handle unicore attn? */
2473 if (asserted & ATTN_SW_TIMER_4_FUNC)
2474 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2476 if (asserted & GPIO_2_FUNC)
2477 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2479 if (asserted & GPIO_3_FUNC)
2480 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2482 if (asserted & GPIO_4_FUNC)
2483 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2486 if (asserted & ATTN_GENERAL_ATTN_1) {
2487 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2488 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2490 if (asserted & ATTN_GENERAL_ATTN_2) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2494 if (asserted & ATTN_GENERAL_ATTN_3) {
2495 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2499 if (asserted & ATTN_GENERAL_ATTN_4) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2503 if (asserted & ATTN_GENERAL_ATTN_5) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2507 if (asserted & ATTN_GENERAL_ATTN_6) {
2508 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2513 } /* if hardwired */
2515 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2516 asserted, BAR_IGU_INTMEM + igu_addr);
2517 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2519 /* now set back the mask */
2520 if (asserted & ATTN_NIG_FOR_FUNC)
2521 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2524 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2526 int port = BP_PORT(bp);
2530 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2531 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2533 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2535 val = REG_RD(bp, reg_offset);
2536 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2537 REG_WR(bp, reg_offset, val);
2539 BNX2X_ERR("SPIO5 hw attention\n");
2541 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2542 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2543 /* Fan failure attention */
2545 /* The PHY reset is controled by GPIO 1 */
2546 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2547 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2548 /* Low power mode is controled by GPIO 2 */
2549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2550 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2551 /* mark the failure */
2552 bp->link_params.ext_phy_config &=
2553 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2554 bp->link_params.ext_phy_config |=
2555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2557 dev_info.port_hw_config[port].
2558 external_phy_config,
2559 bp->link_params.ext_phy_config);
2560 /* log the failure */
2561 printk(KERN_ERR PFX "Fan Failure on Network"
2562 " Controller %s has caused the driver to"
2563 " shutdown the card to prevent permanent"
2564 " damage. Please contact Dell Support for"
2565 " assistance\n", bp->dev->name);
2573 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2575 val = REG_RD(bp, reg_offset);
2576 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2577 REG_WR(bp, reg_offset, val);
2579 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2580 (attn & HW_INTERRUT_ASSERT_SET_0));
2585 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2589 if (attn & BNX2X_DOORQ_ASSERT) {
2591 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2592 BNX2X_ERR("DB hw attention 0x%x\n", val);
2593 /* DORQ discard attention */
2595 BNX2X_ERR("FATAL error from DORQ\n");
2598 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2600 int port = BP_PORT(bp);
2603 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2604 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2606 val = REG_RD(bp, reg_offset);
2607 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2608 REG_WR(bp, reg_offset, val);
2610 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2611 (attn & HW_INTERRUT_ASSERT_SET_1));
2616 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2620 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2622 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2623 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2624 /* CFC error attention */
2626 BNX2X_ERR("FATAL error from CFC\n");
2629 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2631 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2632 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2633 /* RQ_USDMDP_FIFO_OVERFLOW */
2635 BNX2X_ERR("FATAL error from PXP\n");
2638 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2640 int port = BP_PORT(bp);
2643 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2644 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2646 val = REG_RD(bp, reg_offset);
2647 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2648 REG_WR(bp, reg_offset, val);
2650 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2651 (attn & HW_INTERRUT_ASSERT_SET_2));
2656 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2660 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2662 if (attn & BNX2X_PMF_LINK_ASSERT) {
2663 int func = BP_FUNC(bp);
2665 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2666 bnx2x__link_status_update(bp);
2667 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2669 bnx2x_pmf_update(bp);
2671 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2673 BNX2X_ERR("MC assert!\n");
2674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2680 } else if (attn & BNX2X_MCP_ASSERT) {
2682 BNX2X_ERR("MCP assert!\n");
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2687 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2690 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2691 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2692 if (attn & BNX2X_GRC_TIMEOUT) {
2693 val = CHIP_IS_E1H(bp) ?
2694 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2695 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2697 if (attn & BNX2X_GRC_RSV) {
2698 val = CHIP_IS_E1H(bp) ?
2699 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2700 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2702 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2706 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2708 struct attn_route attn;
2709 struct attn_route group_mask;
2710 int port = BP_PORT(bp);
2715 /* need to take HW lock because MCP or other port might also
2716 try to handle this event */
2719 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2720 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2721 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2722 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2723 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2724 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2726 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2727 if (deasserted & (1 << index)) {
2728 group_mask = bp->attn_group[index];
2730 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2731 index, group_mask.sig[0], group_mask.sig[1],
2732 group_mask.sig[2], group_mask.sig[3]);
2734 bnx2x_attn_int_deasserted3(bp,
2735 attn.sig[3] & group_mask.sig[3]);
2736 bnx2x_attn_int_deasserted1(bp,
2737 attn.sig[1] & group_mask.sig[1]);
2738 bnx2x_attn_int_deasserted2(bp,
2739 attn.sig[2] & group_mask.sig[2]);
2740 bnx2x_attn_int_deasserted0(bp,
2741 attn.sig[0] & group_mask.sig[0]);
2743 if ((attn.sig[0] & group_mask.sig[0] &
2744 HW_PRTY_ASSERT_SET_0) ||
2745 (attn.sig[1] & group_mask.sig[1] &
2746 HW_PRTY_ASSERT_SET_1) ||
2747 (attn.sig[2] & group_mask.sig[2] &
2748 HW_PRTY_ASSERT_SET_2))
2749 BNX2X_ERR("FATAL HW block parity attention\n");
2753 bnx2x_unlock_alr(bp);
2755 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2758 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2759 val, BAR_IGU_INTMEM + reg_addr); */
2760 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2762 if (bp->aeu_mask & (deasserted & 0xff))
2763 BNX2X_ERR("IGU BUG!\n");
2764 if (~bp->attn_state & deasserted)
2765 BNX2X_ERR("IGU BUG!\n");
2767 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2768 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2770 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2771 bp->aeu_mask |= (deasserted & 0xff);
2773 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2774 REG_WR(bp, reg_addr, bp->aeu_mask);
2776 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2777 bp->attn_state &= ~deasserted;
2778 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2781 static void bnx2x_attn_int(struct bnx2x *bp)
2783 /* read local copy of bits */
2784 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2785 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2786 u32 attn_state = bp->attn_state;
2788 /* look for changed bits */
2789 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2790 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2793 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2794 attn_bits, attn_ack, asserted, deasserted);
2796 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2797 BNX2X_ERR("BAD attention state\n");
2799 /* handle bits that were raised */
2801 bnx2x_attn_int_asserted(bp, asserted);
2804 bnx2x_attn_int_deasserted(bp, deasserted);
2807 static void bnx2x_sp_task(struct work_struct *work)
2809 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2813 /* Return here if interrupt is disabled */
2814 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2815 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2819 status = bnx2x_update_dsb_idx(bp);
2820 /* if (status == 0) */
2821 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2823 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2829 /* CStorm events: query_stats, port delete ramrod */
2831 bp->stats_pending = 0;
2833 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2835 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2837 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2839 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2841 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2846 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2848 struct net_device *dev = dev_instance;
2849 struct bnx2x *bp = netdev_priv(dev);
2851 /* Return here if interrupt is disabled */
2852 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2853 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2857 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2859 #ifdef BNX2X_STOP_ON_ERROR
2860 if (unlikely(bp->panic))
2864 schedule_work(&bp->sp_task);
2869 /* end of slow path */
2873 /****************************************************************************
2875 ****************************************************************************/
2877 /* sum[hi:lo] += add[hi:lo] */
2878 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2881 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2884 /* difference = minuend - subtrahend */
2885 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2887 if (m_lo < s_lo) { \
2889 d_hi = m_hi - s_hi; \
2891 /* we can 'loan' 1 */ \
2893 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2895 /* m_hi <= s_hi */ \
2900 /* m_lo >= s_lo */ \
2901 if (m_hi < s_hi) { \
2905 /* m_hi >= s_hi */ \
2906 d_hi = m_hi - s_hi; \
2907 d_lo = m_lo - s_lo; \
2912 #define UPDATE_STAT64(s, t) \
2914 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2915 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2916 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2917 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2918 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2919 pstats->mac_stx[1].t##_lo, diff.lo); \
2922 #define UPDATE_STAT64_NIG(s, t) \
2924 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2925 diff.lo, new->s##_lo, old->s##_lo); \
2926 ADD_64(estats->t##_hi, diff.hi, \
2927 estats->t##_lo, diff.lo); \
2930 /* sum[hi:lo] += add */
2931 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2934 s_hi += (s_lo < a) ? 1 : 0; \
2937 #define UPDATE_EXTEND_STAT(s) \
2939 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2940 pstats->mac_stx[1].s##_lo, \
2944 #define UPDATE_EXTEND_TSTAT(s, t) \
2946 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2947 old_tclient->s = le32_to_cpu(tclient->s); \
2948 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2951 #define UPDATE_EXTEND_XSTAT(s, t) \
2953 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2954 old_xclient->s = le32_to_cpu(xclient->s); \
2955 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2959 * General service functions
2962 static inline long bnx2x_hilo(u32 *hiref)
2964 u32 lo = *(hiref + 1);
2965 #if (BITS_PER_LONG == 64)
2968 return HILO_U64(hi, lo);
2975 * Init service functions
2978 static void bnx2x_storm_stats_init(struct bnx2x *bp)
2980 int func = BP_FUNC(bp);
2982 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2983 REG_WR(bp, BAR_XSTRORM_INTMEM +
2984 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2986 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2987 REG_WR(bp, BAR_TSTRORM_INTMEM +
2988 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2990 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2991 REG_WR(bp, BAR_CSTRORM_INTMEM +
2992 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2994 REG_WR(bp, BAR_XSTRORM_INTMEM +
2995 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2996 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2997 REG_WR(bp, BAR_XSTRORM_INTMEM +
2998 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2999 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3001 REG_WR(bp, BAR_TSTRORM_INTMEM +
3002 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3003 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3004 REG_WR(bp, BAR_TSTRORM_INTMEM +
3005 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3006 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3009 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3011 if (!bp->stats_pending) {
3012 struct eth_query_ramrod_data ramrod_data = {0};
3015 ramrod_data.drv_counter = bp->stats_counter++;
3016 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3017 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3019 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3020 ((u32 *)&ramrod_data)[1],
3021 ((u32 *)&ramrod_data)[0], 0);
3023 /* stats ramrod has it's own slot on the spq */
3025 bp->stats_pending = 1;
3030 static void bnx2x_stats_init(struct bnx2x *bp)
3032 int port = BP_PORT(bp);
3034 bp->executer_idx = 0;
3035 bp->stats_counter = 0;
3039 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3041 bp->port.port_stx = 0;
3042 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3044 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3045 bp->port.old_nig_stats.brb_discard =
3046 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3047 bp->port.old_nig_stats.brb_truncate =
3048 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3049 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3050 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3051 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3052 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3054 /* function stats */
3055 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3056 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3057 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3058 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3060 bp->stats_state = STATS_STATE_DISABLED;
3061 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3062 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3065 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3067 struct dmae_command *dmae = &bp->stats_dmae;
3068 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3070 *stats_comp = DMAE_COMP_VAL;
3073 if (bp->executer_idx) {
3074 int loader_idx = PMF_DMAE_C(bp);
3076 memset(dmae, 0, sizeof(struct dmae_command));
3078 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3079 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3080 DMAE_CMD_DST_RESET |
3082 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3084 DMAE_CMD_ENDIANITY_DW_SWAP |
3086 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3088 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3089 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3090 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3091 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3092 sizeof(struct dmae_command) *
3093 (loader_idx + 1)) >> 2;
3094 dmae->dst_addr_hi = 0;
3095 dmae->len = sizeof(struct dmae_command) >> 2;
3098 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3099 dmae->comp_addr_hi = 0;
3103 bnx2x_post_dmae(bp, dmae, loader_idx);
3105 } else if (bp->func_stx) {
3107 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3111 static int bnx2x_stats_comp(struct bnx2x *bp)
3113 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3117 while (*stats_comp != DMAE_COMP_VAL) {
3120 BNX2X_ERR("timeout waiting for stats finished\n");
3129 * Statistics service functions
3132 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3134 struct dmae_command *dmae;
3136 int loader_idx = PMF_DMAE_C(bp);
3137 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3140 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3141 BNX2X_ERR("BUG!\n");
3145 bp->executer_idx = 0;
3147 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3149 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3151 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3153 DMAE_CMD_ENDIANITY_DW_SWAP |
3155 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3156 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3158 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3159 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3160 dmae->src_addr_lo = bp->port.port_stx >> 2;
3161 dmae->src_addr_hi = 0;
3162 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3163 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3164 dmae->len = DMAE_LEN32_RD_MAX;
3165 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3166 dmae->comp_addr_hi = 0;
3169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3170 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3171 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3172 dmae->src_addr_hi = 0;
3173 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3174 DMAE_LEN32_RD_MAX * 4);
3175 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3176 DMAE_LEN32_RD_MAX * 4);
3177 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3178 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3179 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3180 dmae->comp_val = DMAE_COMP_VAL;
3183 bnx2x_hw_stats_post(bp);
3184 bnx2x_stats_comp(bp);
3187 static void bnx2x_port_stats_init(struct bnx2x *bp)
3189 struct dmae_command *dmae;
3190 int port = BP_PORT(bp);
3191 int vn = BP_E1HVN(bp);
3193 int loader_idx = PMF_DMAE_C(bp);
3195 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3198 if (!bp->link_vars.link_up || !bp->port.pmf) {
3199 BNX2X_ERR("BUG!\n");
3203 bp->executer_idx = 0;
3206 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3207 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3208 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3210 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3212 DMAE_CMD_ENDIANITY_DW_SWAP |
3214 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3215 (vn << DMAE_CMD_E1HVN_SHIFT));
3217 if (bp->port.port_stx) {
3219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3220 dmae->opcode = opcode;
3221 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3222 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3223 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3224 dmae->dst_addr_hi = 0;
3225 dmae->len = sizeof(struct host_port_stats) >> 2;
3226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3227 dmae->comp_addr_hi = 0;
3233 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3234 dmae->opcode = opcode;
3235 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3236 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3237 dmae->dst_addr_lo = bp->func_stx >> 2;
3238 dmae->dst_addr_hi = 0;
3239 dmae->len = sizeof(struct host_func_stats) >> 2;
3240 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3241 dmae->comp_addr_hi = 0;
3246 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3247 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3248 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3250 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3252 DMAE_CMD_ENDIANITY_DW_SWAP |
3254 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3255 (vn << DMAE_CMD_E1HVN_SHIFT));
3257 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3259 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3260 NIG_REG_INGRESS_BMAC0_MEM);
3262 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3263 BIGMAC_REGISTER_TX_STAT_GTBYT */
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = opcode;
3266 dmae->src_addr_lo = (mac_addr +
3267 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3268 dmae->src_addr_hi = 0;
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3271 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3272 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3273 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3274 dmae->comp_addr_hi = 0;
3277 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3278 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3279 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3280 dmae->opcode = opcode;
3281 dmae->src_addr_lo = (mac_addr +
3282 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3283 dmae->src_addr_hi = 0;
3284 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3285 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3287 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3288 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3289 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3290 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3291 dmae->comp_addr_hi = 0;
3294 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3296 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3298 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3299 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3300 dmae->opcode = opcode;
3301 dmae->src_addr_lo = (mac_addr +
3302 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3303 dmae->src_addr_hi = 0;
3304 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3305 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3306 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3307 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308 dmae->comp_addr_hi = 0;
3311 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (mac_addr +
3315 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3318 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3319 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3320 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3326 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3327 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328 dmae->opcode = opcode;
3329 dmae->src_addr_lo = (mac_addr +
3330 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3331 dmae->src_addr_hi = 0;
3332 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3333 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3335 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3336 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338 dmae->comp_addr_hi = 0;
3343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3344 dmae->opcode = opcode;
3345 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3346 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3347 dmae->src_addr_hi = 0;
3348 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3349 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3350 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3356 dmae->opcode = opcode;
3357 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3358 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3359 dmae->src_addr_hi = 0;
3360 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3361 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3362 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3363 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3364 dmae->len = (2*sizeof(u32)) >> 2;
3365 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3366 dmae->comp_addr_hi = 0;
3369 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3370 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3371 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3372 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3374 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3376 DMAE_CMD_ENDIANITY_DW_SWAP |
3378 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3379 (vn << DMAE_CMD_E1HVN_SHIFT));
3380 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3381 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3382 dmae->src_addr_hi = 0;
3383 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3384 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3385 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3386 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3387 dmae->len = (2*sizeof(u32)) >> 2;
3388 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3389 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3390 dmae->comp_val = DMAE_COMP_VAL;
3395 static void bnx2x_func_stats_init(struct bnx2x *bp)
3397 struct dmae_command *dmae = &bp->stats_dmae;
3398 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3401 if (!bp->func_stx) {
3402 BNX2X_ERR("BUG!\n");
3406 bp->executer_idx = 0;
3407 memset(dmae, 0, sizeof(struct dmae_command));
3409 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3410 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3411 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3413 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3415 DMAE_CMD_ENDIANITY_DW_SWAP |
3417 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3418 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3419 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3420 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3421 dmae->dst_addr_lo = bp->func_stx >> 2;
3422 dmae->dst_addr_hi = 0;
3423 dmae->len = sizeof(struct host_func_stats) >> 2;
3424 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3425 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3426 dmae->comp_val = DMAE_COMP_VAL;
3431 static void bnx2x_stats_start(struct bnx2x *bp)
3434 bnx2x_port_stats_init(bp);
3436 else if (bp->func_stx)
3437 bnx2x_func_stats_init(bp);
3439 bnx2x_hw_stats_post(bp);
3440 bnx2x_storm_stats_post(bp);
3443 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3445 bnx2x_stats_comp(bp);
3446 bnx2x_stats_pmf_update(bp);
3447 bnx2x_stats_start(bp);
3450 static void bnx2x_stats_restart(struct bnx2x *bp)
3452 bnx2x_stats_comp(bp);
3453 bnx2x_stats_start(bp);
3456 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3458 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3459 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3460 struct regpair diff;
3462 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3463 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3464 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3465 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3466 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3467 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3468 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3469 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3470 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3471 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3472 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3473 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3474 UPDATE_STAT64(tx_stat_gt127,
3475 tx_stat_etherstatspkts65octetsto127octets);
3476 UPDATE_STAT64(tx_stat_gt255,
3477 tx_stat_etherstatspkts128octetsto255octets);
3478 UPDATE_STAT64(tx_stat_gt511,
3479 tx_stat_etherstatspkts256octetsto511octets);
3480 UPDATE_STAT64(tx_stat_gt1023,
3481 tx_stat_etherstatspkts512octetsto1023octets);
3482 UPDATE_STAT64(tx_stat_gt1518,
3483 tx_stat_etherstatspkts1024octetsto1522octets);
3484 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3485 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3486 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3487 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3488 UPDATE_STAT64(tx_stat_gterr,
3489 tx_stat_dot3statsinternalmactransmiterrors);
3490 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3493 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3495 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3496 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3498 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3499 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3500 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3501 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3502 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3503 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3504 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3505 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3506 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3507 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3508 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3509 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3510 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3511 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3512 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3513 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3514 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3515 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3517 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3518 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3519 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3520 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3524 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3525 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3526 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3527 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3528 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3531 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3533 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3534 struct nig_stats *old = &(bp->port.old_nig_stats);
3535 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3536 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3537 struct regpair diff;
3539 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3540 bnx2x_bmac_stats_update(bp);
3542 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3543 bnx2x_emac_stats_update(bp);
3545 else { /* unreached */
3546 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3550 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3551 new->brb_discard - old->brb_discard);
3552 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3553 new->brb_truncate - old->brb_truncate);
3555 UPDATE_STAT64_NIG(egress_mac_pkt0,
3556 etherstatspkts1024octetsto1522octets);
3557 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3559 memcpy(old, new, sizeof(struct nig_stats));
3561 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3562 sizeof(struct mac_stx));
3563 estats->brb_drop_hi = pstats->brb_drop_hi;
3564 estats->brb_drop_lo = pstats->brb_drop_lo;
3566 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3571 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3573 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3574 int cl_id = BP_CL_ID(bp);
3575 struct tstorm_per_port_stats *tport =
3576 &stats->tstorm_common.port_statistics;
3577 struct tstorm_per_client_stats *tclient =
3578 &stats->tstorm_common.client_statistics[cl_id];
3579 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3580 struct xstorm_per_client_stats *xclient =
3581 &stats->xstorm_common.client_statistics[cl_id];
3582 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3583 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3584 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3587 /* are storm stats valid? */
3588 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3589 bp->stats_counter) {
3590 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3591 " tstorm counter (%d) != stats_counter (%d)\n",
3592 tclient->stats_counter, bp->stats_counter);
3595 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3596 bp->stats_counter) {
3597 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3598 " xstorm counter (%d) != stats_counter (%d)\n",
3599 xclient->stats_counter, bp->stats_counter);
3603 fstats->total_bytes_received_hi =
3604 fstats->valid_bytes_received_hi =
3605 le32_to_cpu(tclient->total_rcv_bytes.hi);
3606 fstats->total_bytes_received_lo =
3607 fstats->valid_bytes_received_lo =
3608 le32_to_cpu(tclient->total_rcv_bytes.lo);
3610 estats->error_bytes_received_hi =
3611 le32_to_cpu(tclient->rcv_error_bytes.hi);
3612 estats->error_bytes_received_lo =
3613 le32_to_cpu(tclient->rcv_error_bytes.lo);
3614 ADD_64(estats->error_bytes_received_hi,
3615 estats->rx_stat_ifhcinbadoctets_hi,
3616 estats->error_bytes_received_lo,
3617 estats->rx_stat_ifhcinbadoctets_lo);
3619 ADD_64(fstats->total_bytes_received_hi,
3620 estats->error_bytes_received_hi,
3621 fstats->total_bytes_received_lo,
3622 estats->error_bytes_received_lo);
3624 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3625 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3626 total_multicast_packets_received);
3627 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3628 total_broadcast_packets_received);
3630 fstats->total_bytes_transmitted_hi =
3631 le32_to_cpu(xclient->total_sent_bytes.hi);
3632 fstats->total_bytes_transmitted_lo =
3633 le32_to_cpu(xclient->total_sent_bytes.lo);
3635 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3636 total_unicast_packets_transmitted);
3637 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3638 total_multicast_packets_transmitted);
3639 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3640 total_broadcast_packets_transmitted);
3642 memcpy(estats, &(fstats->total_bytes_received_hi),
3643 sizeof(struct host_func_stats) - 2*sizeof(u32));
3645 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3646 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3647 estats->brb_truncate_discard =
3648 le32_to_cpu(tport->brb_truncate_discard);
3649 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3651 old_tclient->rcv_unicast_bytes.hi =
3652 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3653 old_tclient->rcv_unicast_bytes.lo =
3654 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3655 old_tclient->rcv_broadcast_bytes.hi =
3656 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3657 old_tclient->rcv_broadcast_bytes.lo =
3658 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3659 old_tclient->rcv_multicast_bytes.hi =
3660 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3661 old_tclient->rcv_multicast_bytes.lo =
3662 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3663 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3665 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3666 old_tclient->packets_too_big_discard =
3667 le32_to_cpu(tclient->packets_too_big_discard);
3668 estats->no_buff_discard =
3669 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3670 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3672 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3673 old_xclient->unicast_bytes_sent.hi =
3674 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3675 old_xclient->unicast_bytes_sent.lo =
3676 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3677 old_xclient->multicast_bytes_sent.hi =
3678 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3679 old_xclient->multicast_bytes_sent.lo =
3680 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3681 old_xclient->broadcast_bytes_sent.hi =
3682 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3683 old_xclient->broadcast_bytes_sent.lo =
3684 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3686 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3691 static void bnx2x_net_stats_update(struct bnx2x *bp)
3693 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3694 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3695 struct net_device_stats *nstats = &bp->dev->stats;
3697 nstats->rx_packets =
3698 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3699 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3700 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3702 nstats->tx_packets =
3703 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3704 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3705 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3707 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3709 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3711 nstats->rx_dropped = old_tclient->checksum_discard +
3712 estats->mac_discard;
3713 nstats->tx_dropped = 0;
3716 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3718 nstats->collisions =
3719 estats->tx_stat_dot3statssinglecollisionframes_lo +
3720 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3721 estats->tx_stat_dot3statslatecollisions_lo +
3722 estats->tx_stat_dot3statsexcessivecollisions_lo;
3724 estats->jabber_packets_received =
3725 old_tclient->packets_too_big_discard +
3726 estats->rx_stat_dot3statsframestoolong_lo;
3728 nstats->rx_length_errors =
3729 estats->rx_stat_etherstatsundersizepkts_lo +
3730 estats->jabber_packets_received;
3731 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3732 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3733 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3734 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3735 nstats->rx_missed_errors = estats->xxoverflow_discard;
3737 nstats->rx_errors = nstats->rx_length_errors +
3738 nstats->rx_over_errors +
3739 nstats->rx_crc_errors +
3740 nstats->rx_frame_errors +
3741 nstats->rx_fifo_errors +
3742 nstats->rx_missed_errors;
3744 nstats->tx_aborted_errors =
3745 estats->tx_stat_dot3statslatecollisions_lo +
3746 estats->tx_stat_dot3statsexcessivecollisions_lo;
3747 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3748 nstats->tx_fifo_errors = 0;
3749 nstats->tx_heartbeat_errors = 0;
3750 nstats->tx_window_errors = 0;
3752 nstats->tx_errors = nstats->tx_aborted_errors +
3753 nstats->tx_carrier_errors;
3756 static void bnx2x_stats_update(struct bnx2x *bp)
3758 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3761 if (*stats_comp != DMAE_COMP_VAL)
3765 update = (bnx2x_hw_stats_update(bp) == 0);
3767 update |= (bnx2x_storm_stats_update(bp) == 0);
3770 bnx2x_net_stats_update(bp);
3773 if (bp->stats_pending) {
3774 bp->stats_pending++;
3775 if (bp->stats_pending == 3) {
3776 BNX2X_ERR("stats not updated for 3 times\n");
3783 if (bp->msglevel & NETIF_MSG_TIMER) {
3784 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3785 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3786 struct net_device_stats *nstats = &bp->dev->stats;
3789 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3790 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3792 bnx2x_tx_avail(bp->fp),
3793 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3794 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3796 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3797 bp->fp->rx_comp_cons),
3798 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3799 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3800 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3801 estats->driver_xoff, estats->brb_drop_lo);
3802 printk(KERN_DEBUG "tstats: checksum_discard %u "
3803 "packets_too_big_discard %u no_buff_discard %u "
3804 "mac_discard %u mac_filter_discard %u "
3805 "xxovrflow_discard %u brb_truncate_discard %u "
3806 "ttl0_discard %u\n",
3807 old_tclient->checksum_discard,
3808 old_tclient->packets_too_big_discard,
3809 old_tclient->no_buff_discard, estats->mac_discard,
3810 estats->mac_filter_discard, estats->xxoverflow_discard,
3811 estats->brb_truncate_discard,
3812 old_tclient->ttl0_discard);
3814 for_each_queue(bp, i) {
3815 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3816 bnx2x_fp(bp, i, tx_pkt),
3817 bnx2x_fp(bp, i, rx_pkt),
3818 bnx2x_fp(bp, i, rx_calls));
3822 bnx2x_hw_stats_post(bp);
3823 bnx2x_storm_stats_post(bp);
3826 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3828 struct dmae_command *dmae;
3830 int loader_idx = PMF_DMAE_C(bp);
3831 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3833 bp->executer_idx = 0;
3835 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3837 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3839 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3841 DMAE_CMD_ENDIANITY_DW_SWAP |
3843 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3844 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3846 if (bp->port.port_stx) {
3848 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3850 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3852 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3853 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3854 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3855 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3856 dmae->dst_addr_hi = 0;
3857 dmae->len = sizeof(struct host_port_stats) >> 2;
3859 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3860 dmae->comp_addr_hi = 0;
3863 dmae->comp_addr_lo =
3864 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3865 dmae->comp_addr_hi =
3866 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_val = DMAE_COMP_VAL;
3875 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3876 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3877 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3878 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3879 dmae->dst_addr_lo = bp->func_stx >> 2;
3880 dmae->dst_addr_hi = 0;
3881 dmae->len = sizeof(struct host_func_stats) >> 2;
3882 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3883 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3884 dmae->comp_val = DMAE_COMP_VAL;
3890 static void bnx2x_stats_stop(struct bnx2x *bp)
3894 bnx2x_stats_comp(bp);
3897 update = (bnx2x_hw_stats_update(bp) == 0);
3899 update |= (bnx2x_storm_stats_update(bp) == 0);
3902 bnx2x_net_stats_update(bp);
3905 bnx2x_port_stats_stop(bp);
3907 bnx2x_hw_stats_post(bp);
3908 bnx2x_stats_comp(bp);
3912 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3916 static const struct {
3917 void (*action)(struct bnx2x *bp);
3918 enum bnx2x_stats_state next_state;
3919 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3922 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3923 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3924 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3925 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3928 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3929 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3930 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3931 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3935 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3937 enum bnx2x_stats_state state = bp->stats_state;
3939 bnx2x_stats_stm[state][event].action(bp);
3940 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3942 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3943 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3944 state, event, bp->stats_state);
3947 static void bnx2x_timer(unsigned long data)
3949 struct bnx2x *bp = (struct bnx2x *) data;
3951 if (!netif_running(bp->dev))
3954 if (atomic_read(&bp->intr_sem) != 0)
3958 struct bnx2x_fastpath *fp = &bp->fp[0];
3961 bnx2x_tx_int(fp, 1000);
3962 rc = bnx2x_rx_int(fp, 1000);
3965 if (!BP_NOMCP(bp)) {
3966 int func = BP_FUNC(bp);
3970 ++bp->fw_drv_pulse_wr_seq;
3971 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3972 /* TBD - add SYSTEM_TIME */
3973 drv_pulse = bp->fw_drv_pulse_wr_seq;
3974 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3976 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3977 MCP_PULSE_SEQ_MASK);
3978 /* The delta between driver pulse and mcp response
3979 * should be 1 (before mcp response) or 0 (after mcp response)
3981 if ((drv_pulse != mcp_pulse) &&
3982 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3983 /* someone lost a heartbeat... */
3984 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3985 drv_pulse, mcp_pulse);
3989 if ((bp->state == BNX2X_STATE_OPEN) ||
3990 (bp->state == BNX2X_STATE_DISABLED))
3991 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3994 mod_timer(&bp->timer, jiffies + bp->current_interval);
3997 /* end of Statistics */
4002 * nic init service functions
4005 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4007 int port = BP_PORT(bp);
4009 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4010 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4011 sizeof(struct ustorm_def_status_block)/4);
4012 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4013 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4014 sizeof(struct cstorm_def_status_block)/4);
4017 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4018 struct host_status_block *sb, dma_addr_t mapping)
4020 int port = BP_PORT(bp);
4021 int func = BP_FUNC(bp);
4026 section = ((u64)mapping) + offsetof(struct host_status_block,
4028 sb->u_status_block.status_block_id = sb_id;
4030 REG_WR(bp, BAR_USTRORM_INTMEM +
4031 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4032 REG_WR(bp, BAR_USTRORM_INTMEM +
4033 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4035 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4036 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4038 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4039 REG_WR16(bp, BAR_USTRORM_INTMEM +
4040 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4043 section = ((u64)mapping) + offsetof(struct host_status_block,
4045 sb->c_status_block.status_block_id = sb_id;
4047 REG_WR(bp, BAR_CSTRORM_INTMEM +
4048 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4049 REG_WR(bp, BAR_CSTRORM_INTMEM +
4050 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4052 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4053 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4055 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4056 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4057 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4059 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4062 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4064 int func = BP_FUNC(bp);
4066 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4067 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4068 sizeof(struct ustorm_def_status_block)/4);
4069 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4070 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4071 sizeof(struct cstorm_def_status_block)/4);
4072 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4073 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4074 sizeof(struct xstorm_def_status_block)/4);
4075 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4076 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4077 sizeof(struct tstorm_def_status_block)/4);
4080 static void bnx2x_init_def_sb(struct bnx2x *bp,
4081 struct host_def_status_block *def_sb,
4082 dma_addr_t mapping, int sb_id)
4084 int port = BP_PORT(bp);
4085 int func = BP_FUNC(bp);
4086 int index, val, reg_offset;
4090 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4091 atten_status_block);
4092 def_sb->atten_status_block.status_block_id = sb_id;
4094 bp->def_att_idx = 0;
4097 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4098 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4100 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4101 bp->attn_group[index].sig[0] = REG_RD(bp,
4102 reg_offset + 0x10*index);
4103 bp->attn_group[index].sig[1] = REG_RD(bp,
4104 reg_offset + 0x4 + 0x10*index);
4105 bp->attn_group[index].sig[2] = REG_RD(bp,
4106 reg_offset + 0x8 + 0x10*index);
4107 bp->attn_group[index].sig[3] = REG_RD(bp,
4108 reg_offset + 0xc + 0x10*index);
4111 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4112 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4114 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4115 HC_REG_ATTN_MSG0_ADDR_L);
4117 REG_WR(bp, reg_offset, U64_LO(section));
4118 REG_WR(bp, reg_offset + 4, U64_HI(section));
4120 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4122 val = REG_RD(bp, reg_offset);
4124 REG_WR(bp, reg_offset, val);
4127 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4128 u_def_status_block);
4129 def_sb->u_def_status_block.status_block_id = sb_id;
4133 REG_WR(bp, BAR_USTRORM_INTMEM +
4134 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4135 REG_WR(bp, BAR_USTRORM_INTMEM +
4136 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4138 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4139 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4140 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4143 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4144 REG_WR16(bp, BAR_USTRORM_INTMEM +
4145 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4148 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4149 c_def_status_block);
4150 def_sb->c_def_status_block.status_block_id = sb_id;
4154 REG_WR(bp, BAR_CSTRORM_INTMEM +
4155 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4156 REG_WR(bp, BAR_CSTRORM_INTMEM +
4157 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4160 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4161 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4164 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4165 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4166 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4169 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170 t_def_status_block);
4171 def_sb->t_def_status_block.status_block_id = sb_id;
4175 REG_WR(bp, BAR_TSTRORM_INTMEM +
4176 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4177 REG_WR(bp, BAR_TSTRORM_INTMEM +
4178 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4180 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4181 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4182 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4185 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4186 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4187 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4190 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4191 x_def_status_block);
4192 def_sb->x_def_status_block.status_block_id = sb_id;
4196 REG_WR(bp, BAR_XSTRORM_INTMEM +
4197 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4198 REG_WR(bp, BAR_XSTRORM_INTMEM +
4199 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4201 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4202 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4203 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4206 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4207 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4208 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4210 bp->stats_pending = 0;
4211 bp->set_mac_pending = 0;
4213 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4216 static void bnx2x_update_coalesce(struct bnx2x *bp)
4218 int port = BP_PORT(bp);
4221 for_each_queue(bp, i) {
4222 int sb_id = bp->fp[i].sb_id;
4224 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4225 REG_WR8(bp, BAR_USTRORM_INTMEM +
4226 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4227 HC_INDEX_U_ETH_RX_CQ_CONS),
4229 REG_WR16(bp, BAR_USTRORM_INTMEM +
4230 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4231 HC_INDEX_U_ETH_RX_CQ_CONS),
4232 bp->rx_ticks ? 0 : 1);
4234 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4235 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4236 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4237 HC_INDEX_C_ETH_TX_CQ_CONS),
4239 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4240 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4241 HC_INDEX_C_ETH_TX_CQ_CONS),
4242 bp->tx_ticks ? 0 : 1);
4246 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4247 struct bnx2x_fastpath *fp, int last)
4251 for (i = 0; i < last; i++) {
4252 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4253 struct sk_buff *skb = rx_buf->skb;
4256 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4260 if (fp->tpa_state[i] == BNX2X_TPA_START)
4261 pci_unmap_single(bp->pdev,
4262 pci_unmap_addr(rx_buf, mapping),
4263 bp->rx_buf_use_size,
4264 PCI_DMA_FROMDEVICE);
4271 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4273 int func = BP_FUNC(bp);
4274 u16 ring_prod, cqe_ring_prod = 0;
4277 bp->rx_buf_use_size = bp->dev->mtu;
4278 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4279 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4281 if (bp->flags & TPA_ENABLE_FLAG) {
4283 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4284 bp->rx_buf_use_size, bp->rx_buf_size,
4285 bp->dev->mtu + ETH_OVREHEAD);
4287 for_each_queue(bp, j) {
4288 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4289 struct bnx2x_fastpath *fp = &bp->fp[j];
4291 fp->tpa_pool[i].skb =
4292 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4293 if (!fp->tpa_pool[i].skb) {
4294 BNX2X_ERR("Failed to allocate TPA "
4295 "skb pool for queue[%d] - "
4296 "disabling TPA on this "
4298 bnx2x_free_tpa_pool(bp, fp, i);
4299 fp->disable_tpa = 1;
4302 pci_unmap_addr_set((struct sw_rx_bd *)
4303 &bp->fp->tpa_pool[i],
4305 fp->tpa_state[i] = BNX2X_TPA_STOP;
4310 for_each_queue(bp, j) {
4311 struct bnx2x_fastpath *fp = &bp->fp[j];
4314 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4315 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4317 /* "next page" elements initialization */
4319 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4320 struct eth_rx_sge *sge;
4322 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4324 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4325 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4327 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4328 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4331 bnx2x_init_sge_ring_bit_mask(fp);
4334 for (i = 1; i <= NUM_RX_RINGS; i++) {
4335 struct eth_rx_bd *rx_bd;
4337 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4339 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4340 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4342 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4343 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4347 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4348 struct eth_rx_cqe_next_page *nextpg;
4350 nextpg = (struct eth_rx_cqe_next_page *)
4351 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4353 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4354 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4356 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4357 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4360 /* Allocate SGEs and initialize the ring elements */
4361 for (i = 0, ring_prod = 0;
4362 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4364 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4365 BNX2X_ERR("was only able to allocate "
4367 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4368 /* Cleanup already allocated elements */
4369 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4370 bnx2x_free_tpa_pool(bp, fp,
4371 ETH_MAX_AGGREGATION_QUEUES_E1H);
4372 fp->disable_tpa = 1;
4376 ring_prod = NEXT_SGE_IDX(ring_prod);
4378 fp->rx_sge_prod = ring_prod;
4380 /* Allocate BDs and initialize BD ring */
4381 fp->rx_comp_cons = 0;
4382 cqe_ring_prod = ring_prod = 0;
4383 for (i = 0; i < bp->rx_ring_size; i++) {
4384 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4385 BNX2X_ERR("was only able to allocate "
4387 bp->eth_stats.rx_skb_alloc_failed++;
4390 ring_prod = NEXT_RX_IDX(ring_prod);
4391 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4392 WARN_ON(ring_prod <= i);
4395 fp->rx_bd_prod = ring_prod;
4396 /* must not have more available CQEs than BDs */
4397 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4399 fp->rx_pkt = fp->rx_calls = 0;
4402 * this will generate an interrupt (to the TSTORM)
4403 * must only be done after chip is initialized
4405 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4410 REG_WR(bp, BAR_USTRORM_INTMEM +
4411 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4412 U64_LO(fp->rx_comp_mapping));
4413 REG_WR(bp, BAR_USTRORM_INTMEM +
4414 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4415 U64_HI(fp->rx_comp_mapping));
4419 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4423 for_each_queue(bp, j) {
4424 struct bnx2x_fastpath *fp = &bp->fp[j];
4426 for (i = 1; i <= NUM_TX_RINGS; i++) {
4427 struct eth_tx_bd *tx_bd =
4428 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4431 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4432 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4434 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4435 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4438 fp->tx_pkt_prod = 0;
4439 fp->tx_pkt_cons = 0;
4442 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4447 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4449 int func = BP_FUNC(bp);
4451 spin_lock_init(&bp->spq_lock);
4453 bp->spq_left = MAX_SPQ_PENDING;
4454 bp->spq_prod_idx = 0;
4455 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4456 bp->spq_prod_bd = bp->spq;
4457 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4459 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4460 U64_LO(bp->spq_mapping));
4462 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4463 U64_HI(bp->spq_mapping));
4465 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4469 static void bnx2x_init_context(struct bnx2x *bp)
4473 for_each_queue(bp, i) {
4474 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4475 struct bnx2x_fastpath *fp = &bp->fp[i];
4476 u8 sb_id = FP_SB_ID(fp);
4478 context->xstorm_st_context.tx_bd_page_base_hi =
4479 U64_HI(fp->tx_desc_mapping);
4480 context->xstorm_st_context.tx_bd_page_base_lo =
4481 U64_LO(fp->tx_desc_mapping);
4482 context->xstorm_st_context.db_data_addr_hi =
4483 U64_HI(fp->tx_prods_mapping);
4484 context->xstorm_st_context.db_data_addr_lo =
4485 U64_LO(fp->tx_prods_mapping);
4486 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4487 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4489 context->ustorm_st_context.common.sb_index_numbers =
4490 BNX2X_RX_SB_INDEX_NUM;
4491 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4492 context->ustorm_st_context.common.status_block_id = sb_id;
4493 context->ustorm_st_context.common.flags =
4494 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4495 context->ustorm_st_context.common.mc_alignment_size = 64;
4496 context->ustorm_st_context.common.bd_buff_size =
4497 bp->rx_buf_use_size;
4498 context->ustorm_st_context.common.bd_page_base_hi =
4499 U64_HI(fp->rx_desc_mapping);
4500 context->ustorm_st_context.common.bd_page_base_lo =
4501 U64_LO(fp->rx_desc_mapping);
4502 if (!fp->disable_tpa) {
4503 context->ustorm_st_context.common.flags |=
4504 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4505 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4506 context->ustorm_st_context.common.sge_buff_size =
4507 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4508 context->ustorm_st_context.common.sge_page_base_hi =
4509 U64_HI(fp->rx_sge_mapping);
4510 context->ustorm_st_context.common.sge_page_base_lo =
4511 U64_LO(fp->rx_sge_mapping);
4514 context->cstorm_st_context.sb_index_number =
4515 HC_INDEX_C_ETH_TX_CQ_CONS;
4516 context->cstorm_st_context.status_block_id = sb_id;
4518 context->xstorm_ag_context.cdu_reserved =
4519 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4520 CDU_REGION_NUMBER_XCM_AG,
4521 ETH_CONNECTION_TYPE);
4522 context->ustorm_ag_context.cdu_usage =
4523 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4524 CDU_REGION_NUMBER_UCM_AG,
4525 ETH_CONNECTION_TYPE);
4529 static void bnx2x_init_ind_table(struct bnx2x *bp)
4531 int port = BP_PORT(bp);
4537 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4538 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4539 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4540 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4541 i % bp->num_queues);
4543 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4546 static void bnx2x_set_client_config(struct bnx2x *bp)
4548 struct tstorm_eth_client_config tstorm_client = {0};
4549 int port = BP_PORT(bp);
4552 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4553 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4554 tstorm_client.config_flags =
4555 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4557 if (bp->rx_mode && bp->vlgrp) {
4558 tstorm_client.config_flags |=
4559 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4560 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4564 if (bp->flags & TPA_ENABLE_FLAG) {
4565 tstorm_client.max_sges_for_packet =
4566 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4567 tstorm_client.max_sges_for_packet =
4568 ((tstorm_client.max_sges_for_packet +
4569 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4570 PAGES_PER_SGE_SHIFT;
4572 tstorm_client.config_flags |=
4573 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4576 for_each_queue(bp, i) {
4577 REG_WR(bp, BAR_TSTRORM_INTMEM +
4578 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4579 ((u32 *)&tstorm_client)[0]);
4580 REG_WR(bp, BAR_TSTRORM_INTMEM +
4581 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4582 ((u32 *)&tstorm_client)[1]);
4585 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4586 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4589 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4591 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4592 int mode = bp->rx_mode;
4593 int mask = (1 << BP_L_ID(bp));
4594 int func = BP_FUNC(bp);
4597 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4600 case BNX2X_RX_MODE_NONE: /* no Rx */
4601 tstorm_mac_filter.ucast_drop_all = mask;
4602 tstorm_mac_filter.mcast_drop_all = mask;
4603 tstorm_mac_filter.bcast_drop_all = mask;
4605 case BNX2X_RX_MODE_NORMAL:
4606 tstorm_mac_filter.bcast_accept_all = mask;
4608 case BNX2X_RX_MODE_ALLMULTI:
4609 tstorm_mac_filter.mcast_accept_all = mask;
4610 tstorm_mac_filter.bcast_accept_all = mask;
4612 case BNX2X_RX_MODE_PROMISC:
4613 tstorm_mac_filter.ucast_accept_all = mask;
4614 tstorm_mac_filter.mcast_accept_all = mask;
4615 tstorm_mac_filter.bcast_accept_all = mask;
4618 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4622 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4623 REG_WR(bp, BAR_TSTRORM_INTMEM +
4624 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4625 ((u32 *)&tstorm_mac_filter)[i]);
4627 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4628 ((u32 *)&tstorm_mac_filter)[i]); */
4631 if (mode != BNX2X_RX_MODE_NONE)
4632 bnx2x_set_client_config(bp);
4635 static void bnx2x_init_internal(struct bnx2x *bp)
4637 struct tstorm_eth_function_common_config tstorm_config = {0};
4638 struct stats_indication_flags stats_flags = {0};
4639 int port = BP_PORT(bp);
4640 int func = BP_FUNC(bp);
4644 tstorm_config.config_flags = MULTI_FLAGS;
4645 tstorm_config.rss_result_mask = MULTI_MASK;
4648 tstorm_config.leading_client_id = BP_L_ID(bp);
4650 REG_WR(bp, BAR_TSTRORM_INTMEM +
4651 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4652 (*(u32 *)&tstorm_config));
4654 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4655 (*(u32 *)&tstorm_config)); */
4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4658 bnx2x_set_storm_rx_mode(bp);
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4673 /* Init statistics related context */
4674 stats_flags.collect_eth = 1;
4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4677 ((u32 *)&stats_flags)[0]);
4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4679 ((u32 *)&stats_flags)[1]);
4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4682 ((u32 *)&stats_flags)[0]);
4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4684 ((u32 *)&stats_flags)[1]);
4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4687 ((u32 *)&stats_flags)[0]);
4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4689 ((u32 *)&stats_flags)[1]);
4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4705 if (CHIP_IS_E1H(bp)) {
4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4708 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4712 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4715 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4719 /* Zero this manualy as its initialization is
4720 currently missing in the initTool */
4721 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
4722 REG_WR(bp, BAR_USTRORM_INTMEM +
4723 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4725 for_each_queue(bp, i) {
4726 struct bnx2x_fastpath *fp = &bp->fp[i];
4729 REG_WR(bp, BAR_USTRORM_INTMEM +
4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4731 U64_LO(fp->rx_comp_mapping));
4732 REG_WR(bp, BAR_USTRORM_INTMEM +
4733 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4734 U64_HI(fp->rx_comp_mapping));
4736 max_agg_size = min((u32)(bp->rx_buf_use_size +
4737 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4739 REG_WR16(bp, BAR_USTRORM_INTMEM +
4740 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4745 static void bnx2x_nic_init(struct bnx2x *bp)
4749 for_each_queue(bp, i) {
4750 struct bnx2x_fastpath *fp = &bp->fp[i];
4753 fp->state = BNX2X_FP_STATE_CLOSED;
4755 fp->cl_id = BP_L_ID(bp) + i;
4756 fp->sb_id = fp->cl_id;
4758 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4759 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4760 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4761 fp->status_blk_mapping);
4764 bnx2x_init_def_sb(bp, bp->def_status_blk,
4765 bp->def_status_blk_mapping, DEF_SB_ID);
4766 bnx2x_update_coalesce(bp);
4767 bnx2x_init_rx_rings(bp);
4768 bnx2x_init_tx_ring(bp);
4769 bnx2x_init_sp_ring(bp);
4770 bnx2x_init_context(bp);
4771 bnx2x_init_internal(bp);
4772 bnx2x_storm_stats_init(bp);
4773 bnx2x_init_ind_table(bp);
4774 bnx2x_int_enable(bp);
4777 /* end of nic init */
4780 * gzip service functions
4783 static int bnx2x_gunzip_init(struct bnx2x *bp)
4785 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4786 &bp->gunzip_mapping);
4787 if (bp->gunzip_buf == NULL)
4790 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4791 if (bp->strm == NULL)
4794 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4796 if (bp->strm->workspace == NULL)
4806 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4807 bp->gunzip_mapping);
4808 bp->gunzip_buf = NULL;
4811 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4812 " un-compression\n", bp->dev->name);
4816 static void bnx2x_gunzip_end(struct bnx2x *bp)
4818 kfree(bp->strm->workspace);
4823 if (bp->gunzip_buf) {
4824 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4825 bp->gunzip_mapping);
4826 bp->gunzip_buf = NULL;
4830 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4834 /* check gzip header */
4835 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4842 if (zbuf[3] & FNAME)
4843 while ((zbuf[n++] != 0) && (n < len));
4845 bp->strm->next_in = zbuf + n;
4846 bp->strm->avail_in = len - n;
4847 bp->strm->next_out = bp->gunzip_buf;
4848 bp->strm->avail_out = FW_BUF_SIZE;
4850 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4854 rc = zlib_inflate(bp->strm, Z_FINISH);
4855 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4856 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4857 bp->dev->name, bp->strm->msg);
4859 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4860 if (bp->gunzip_outlen & 0x3)
4861 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4862 " gunzip_outlen (%d) not aligned\n",
4863 bp->dev->name, bp->gunzip_outlen);
4864 bp->gunzip_outlen >>= 2;
4866 zlib_inflateEnd(bp->strm);
4868 if (rc == Z_STREAM_END)
4874 /* nic load/unload */
4877 * General service functions
4880 /* send a NIG loopback debug packet */
4881 static void bnx2x_lb_pckt(struct bnx2x *bp)
4885 /* Ethernet source and destination addresses */
4886 wb_write[0] = 0x55555555;
4887 wb_write[1] = 0x55555555;
4888 wb_write[2] = 0x20; /* SOP */
4889 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4891 /* NON-IP protocol */
4892 wb_write[0] = 0x09000000;
4893 wb_write[1] = 0x55555555;
4894 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4895 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4898 /* some of the internal memories
4899 * are not directly readable from the driver
4900 * to test them we send debug packets
4902 static int bnx2x_int_mem_test(struct bnx2x *bp)
4908 if (CHIP_REV_IS_FPGA(bp))
4910 else if (CHIP_REV_IS_EMUL(bp))
4915 DP(NETIF_MSG_HW, "start part1\n");
4917 /* Disable inputs of parser neighbor blocks */
4918 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4919 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4920 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4921 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4923 /* Write 0 to parser credits for CFC search request */
4924 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4926 /* send Ethernet packet */
4929 /* TODO do i reset NIG statistic? */
4930 /* Wait until NIG register shows 1 packet of size 0x10 */
4931 count = 1000 * factor;
4934 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4935 val = *bnx2x_sp(bp, wb_data[0]);
4943 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4947 /* Wait until PRS register shows 1 packet */
4948 count = 1000 * factor;
4950 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4958 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4962 /* Reset and init BRB, PRS */
4963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4965 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4967 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4968 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4970 DP(NETIF_MSG_HW, "part2\n");
4972 /* Disable inputs of parser neighbor blocks */
4973 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4974 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4975 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4976 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4978 /* Write 0 to parser credits for CFC search request */
4979 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4981 /* send 10 Ethernet packets */
4982 for (i = 0; i < 10; i++)
4985 /* Wait until NIG register shows 10 + 1
4986 packets of size 11*0x10 = 0xb0 */
4987 count = 1000 * factor;
4990 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4991 val = *bnx2x_sp(bp, wb_data[0]);
4999 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5003 /* Wait until PRS register shows 2 packets */
5004 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5006 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5008 /* Write 1 to parser credits for CFC search request */
5009 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5011 /* Wait until PRS register shows 3 packets */
5012 msleep(10 * factor);
5013 /* Wait until NIG register shows 1 packet of size 0x10 */
5014 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5016 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5018 /* clear NIG EOP FIFO */
5019 for (i = 0; i < 11; i++)
5020 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5021 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5023 BNX2X_ERR("clear of NIG failed\n");
5027 /* Reset and init BRB, PRS, NIG */
5028 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5030 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5032 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5033 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5036 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5039 /* Enable inputs of parser neighbor blocks */
5040 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5041 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5042 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5043 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5045 DP(NETIF_MSG_HW, "done\n");
5050 static void enable_blocks_attention(struct bnx2x *bp)
5052 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5053 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5054 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5055 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5056 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5057 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5058 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5059 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5060 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5061 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5062 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5063 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5064 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5065 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5066 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5067 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5068 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5069 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5070 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5071 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5072 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5073 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5074 if (CHIP_REV_IS_FPGA(bp))
5075 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5077 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5078 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5079 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5080 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5081 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5082 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5083 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5084 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5085 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5086 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5090 static int bnx2x_init_common(struct bnx2x *bp)
5094 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5096 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5099 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5100 if (CHIP_IS_E1H(bp))
5101 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5103 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5105 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5107 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5108 if (CHIP_IS_E1(bp)) {
5109 /* enable HW interrupt from PXP on USDM overflow
5110 bit 16 on INT_MASK_0 */
5111 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5118 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5119 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5120 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5121 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5122 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5123 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5125 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5126 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5127 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5128 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5129 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5134 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5137 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5139 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5140 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5141 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5144 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5145 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5147 /* let the HW do it's magic ... */
5149 /* finish PXP init */
5150 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5152 BNX2X_ERR("PXP2 CFG failed\n");
5155 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5157 BNX2X_ERR("PXP2 RD_INIT failed\n");
5161 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5162 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5164 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5166 /* clean the DMAE memory */
5168 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5170 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5171 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5172 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5173 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5175 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5176 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5177 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5178 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5180 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5181 /* soft reset pulse */
5182 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5183 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5186 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5189 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5190 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5191 if (!CHIP_REV_IS_SLOW(bp)) {
5192 /* enable hw interrupt from doorbell Q */
5193 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5196 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5197 if (CHIP_REV_IS_SLOW(bp)) {
5198 /* fix for emulation and FPGA for no pause */
5199 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5200 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5201 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5202 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5205 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5206 if (CHIP_IS_E1H(bp))
5207 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5209 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5210 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5211 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5212 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5214 if (CHIP_IS_E1H(bp)) {
5215 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5216 STORM_INTMEM_SIZE_E1H/2);
5218 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5219 0, STORM_INTMEM_SIZE_E1H/2);
5220 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5221 STORM_INTMEM_SIZE_E1H/2);
5223 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5224 0, STORM_INTMEM_SIZE_E1H/2);
5225 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5226 STORM_INTMEM_SIZE_E1H/2);
5228 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5229 0, STORM_INTMEM_SIZE_E1H/2);
5230 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5231 STORM_INTMEM_SIZE_E1H/2);
5233 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5234 0, STORM_INTMEM_SIZE_E1H/2);
5236 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5237 STORM_INTMEM_SIZE_E1);
5238 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1);
5240 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5241 STORM_INTMEM_SIZE_E1);
5242 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5243 STORM_INTMEM_SIZE_E1);
5246 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5247 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5248 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5249 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5252 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5254 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5257 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5258 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5259 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5261 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5262 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5263 REG_WR(bp, i, 0xc0cac01a);
5264 /* TODO: replace with something meaningful */
5266 if (CHIP_IS_E1H(bp))
5267 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5268 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5270 if (sizeof(union cdu_context) != 1024)
5271 /* we currently assume that a context is 1024 bytes */
5272 printk(KERN_ALERT PFX "please adjust the size of"
5273 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5275 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5276 val = (4 << 24) + (0 << 12) + 1024;
5277 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5278 if (CHIP_IS_E1(bp)) {
5279 /* !!! fix pxp client crdit until excel update */
5280 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5281 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5284 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5285 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5287 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5288 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5290 /* PXPCS COMMON comes here */
5291 /* Reset PCIE errors for debug */
5292 REG_WR(bp, 0x2814, 0xffffffff);
5293 REG_WR(bp, 0x3820, 0xffffffff);
5295 /* EMAC0 COMMON comes here */
5296 /* EMAC1 COMMON comes here */
5297 /* DBU COMMON comes here */
5298 /* DBG COMMON comes here */
5300 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5301 if (CHIP_IS_E1H(bp)) {
5302 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5303 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5306 if (CHIP_REV_IS_SLOW(bp))
5309 /* finish CFC init */
5310 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5312 BNX2X_ERR("CFC LL_INIT failed\n");
5315 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5317 BNX2X_ERR("CFC AC_INIT failed\n");
5320 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5322 BNX2X_ERR("CFC CAM_INIT failed\n");
5325 REG_WR(bp, CFC_REG_DEBUG0, 0);
5327 /* read NIG statistic
5328 to see if this is our first up since powerup */
5329 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5330 val = *bnx2x_sp(bp, wb_data[0]);
5332 /* do internal memory self test */
5333 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5334 BNX2X_ERR("internal mem self test failed\n");
5338 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5339 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5340 /* Fan failure is indicated by SPIO 5 */
5341 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5342 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5344 /* set to active low mode */
5345 val = REG_RD(bp, MISC_REG_SPIO_INT);
5346 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5347 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5348 REG_WR(bp, MISC_REG_SPIO_INT, val);
5350 /* enable interrupt to signal the IGU */
5351 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5352 val |= (1 << MISC_REGISTERS_SPIO_5);
5353 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5360 /* clear PXP2 attentions */
5361 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5363 enable_blocks_attention(bp);
5365 if (bp->flags & TPA_ENABLE_FLAG) {
5366 struct tstorm_eth_tpa_exist tmp = {0};
5370 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5379 static int bnx2x_init_port(struct bnx2x *bp)
5381 int port = BP_PORT(bp);
5384 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5386 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5388 /* Port PXP comes here */
5389 /* Port PXP2 comes here */
5394 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5395 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5396 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5397 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5402 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5403 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5404 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5410 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5411 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5412 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5415 /* Port CMs come here */
5417 /* Port QM comes here */
5419 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5420 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5422 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5423 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5425 /* Port DQ comes here */
5426 /* Port BRB1 comes here */
5427 /* Port PRS comes here */
5428 /* Port TSDM comes here */
5429 /* Port CSDM comes here */
5430 /* Port USDM comes here */
5431 /* Port XSDM comes here */
5432 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5433 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5434 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5435 port ? USEM_PORT1_END : USEM_PORT0_END);
5436 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5437 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5438 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5439 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5440 /* Port UPB comes here */
5441 /* Port XPB comes here */
5443 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5444 port ? PBF_PORT1_END : PBF_PORT0_END);
5446 /* configure PBF to work without PAUSE mtu 9000 */
5447 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5449 /* update threshold */
5450 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5451 /* update init credit */
5452 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5455 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5457 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5460 /* tell the searcher where the T2 table is */
5461 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5463 wb_write[0] = U64_LO(bp->t2_mapping);
5464 wb_write[1] = U64_HI(bp->t2_mapping);
5465 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5466 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5467 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5468 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5470 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5471 /* Port SRCH comes here */
5473 /* Port CDU comes here */
5474 /* Port CFC comes here */
5476 if (CHIP_IS_E1(bp)) {
5477 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5478 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5480 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5481 port ? HC_PORT1_END : HC_PORT0_END);
5483 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5484 MISC_AEU_PORT0_START,
5485 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5486 /* init aeu_mask_attn_func_0/1:
5487 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5488 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5489 * bits 4-7 are used for "per vn group attention" */
5490 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5491 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5493 /* Port PXPCS comes here */
5494 /* Port EMAC0 comes here */
5495 /* Port EMAC1 comes here */
5496 /* Port DBU comes here */
5497 /* Port DBG comes here */
5498 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5499 port ? NIG_PORT1_END : NIG_PORT0_END);
5501 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5503 if (CHIP_IS_E1H(bp)) {
5505 struct cmng_struct_per_port m_cmng_port;
5508 /* 0x2 disable e1hov, 0x1 enable */
5509 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5510 (IS_E1HMF(bp) ? 0x1 : 0x2));
5512 /* Init RATE SHAPING and FAIRNESS contexts.
5513 Initialize as if there is 10G link. */
5514 wsum = bnx2x_calc_vn_wsum(bp);
5515 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5517 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5518 bnx2x_init_vn_minmax(bp, 2*vn + port,
5519 wsum, 10000, &m_cmng_port);
5522 /* Port MCP comes here */
5523 /* Port DMAE comes here */
5525 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5526 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5527 /* add SPIO 5 to group 0 */
5528 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5529 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5530 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5537 bnx2x__link_reset(bp);
5542 #define ILT_PER_FUNC (768/2)
5543 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5544 /* the phys address is shifted right 12 bits and has an added
5545 1=valid bit added to the 53rd bit
5546 then since this is a wide register(TM)
5547 we split it into two 32 bit writes
5549 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5550 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5551 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5552 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5554 #define CNIC_ILT_LINES 0
5556 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5560 if (CHIP_IS_E1H(bp))
5561 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5563 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5565 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5568 static int bnx2x_init_func(struct bnx2x *bp)
5570 int port = BP_PORT(bp);
5571 int func = BP_FUNC(bp);
5574 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5576 i = FUNC_ILT_BASE(func);
5578 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5579 if (CHIP_IS_E1H(bp)) {
5580 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5581 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5583 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5584 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5587 if (CHIP_IS_E1H(bp)) {
5588 for (i = 0; i < 9; i++)
5589 bnx2x_init_block(bp,
5590 cm_start[func][i], cm_end[func][i]);
5592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5593 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5596 /* HC init per function */
5597 if (CHIP_IS_E1H(bp)) {
5598 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5600 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5601 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5603 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5605 if (CHIP_IS_E1H(bp))
5606 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5608 /* Reset PCIE errors for debug */
5609 REG_WR(bp, 0x2114, 0xffffffff);
5610 REG_WR(bp, 0x2120, 0xffffffff);
5615 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5619 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5620 BP_FUNC(bp), load_code);
5623 mutex_init(&bp->dmae_mutex);
5624 bnx2x_gunzip_init(bp);
5626 switch (load_code) {
5627 case FW_MSG_CODE_DRV_LOAD_COMMON:
5628 rc = bnx2x_init_common(bp);
5633 case FW_MSG_CODE_DRV_LOAD_PORT:
5635 rc = bnx2x_init_port(bp);
5640 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5642 rc = bnx2x_init_func(bp);
5648 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5652 if (!BP_NOMCP(bp)) {
5653 int func = BP_FUNC(bp);
5655 bp->fw_drv_pulse_wr_seq =
5656 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5657 DRV_PULSE_SEQ_MASK);
5658 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5659 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5660 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5664 /* this needs to be done before gunzip end */
5665 bnx2x_zero_def_sb(bp);
5666 for_each_queue(bp, i)
5667 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5670 bnx2x_gunzip_end(bp);
5675 /* send the MCP a request, block until there is a reply */
5676 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5678 int func = BP_FUNC(bp);
5679 u32 seq = ++bp->fw_seq;
5682 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5684 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5685 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5688 /* let the FW do it's magic ... */
5691 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5693 /* Give the FW up to 2 second (200*10ms) */
5694 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5696 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5697 cnt*delay, rc, seq);
5699 /* is this a reply to our command? */
5700 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5701 rc &= FW_MSG_CODE_MASK;
5705 BNX2X_ERR("FW failed to respond!\n");
5713 static void bnx2x_free_mem(struct bnx2x *bp)
5716 #define BNX2X_PCI_FREE(x, y, size) \
5719 pci_free_consistent(bp->pdev, size, x, y); \
5725 #define BNX2X_FREE(x) \
5736 for_each_queue(bp, i) {
5739 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5740 bnx2x_fp(bp, i, status_blk_mapping),
5741 sizeof(struct host_status_block) +
5742 sizeof(struct eth_tx_db_data));
5744 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5745 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5746 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5747 bnx2x_fp(bp, i, tx_desc_mapping),
5748 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5750 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5751 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5752 bnx2x_fp(bp, i, rx_desc_mapping),
5753 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5756 bnx2x_fp(bp, i, rx_comp_mapping),
5757 sizeof(struct eth_fast_path_rx_cqe) *
5761 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5762 bnx2x_fp(bp, i, rx_sge_mapping),
5763 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5765 /* end of fastpath */
5767 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5768 sizeof(struct host_def_status_block));
5770 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5771 sizeof(struct bnx2x_slowpath));
5774 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5775 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5776 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5777 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5779 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5781 #undef BNX2X_PCI_FREE
5785 static int bnx2x_alloc_mem(struct bnx2x *bp)
5788 #define BNX2X_PCI_ALLOC(x, y, size) \
5790 x = pci_alloc_consistent(bp->pdev, size, y); \
5792 goto alloc_mem_err; \
5793 memset(x, 0, size); \
5796 #define BNX2X_ALLOC(x, size) \
5798 x = vmalloc(size); \
5800 goto alloc_mem_err; \
5801 memset(x, 0, size); \
5807 for_each_queue(bp, i) {
5808 bnx2x_fp(bp, i, bp) = bp;
5811 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5812 &bnx2x_fp(bp, i, status_blk_mapping),
5813 sizeof(struct host_status_block) +
5814 sizeof(struct eth_tx_db_data));
5816 bnx2x_fp(bp, i, hw_tx_prods) =
5817 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5819 bnx2x_fp(bp, i, tx_prods_mapping) =
5820 bnx2x_fp(bp, i, status_blk_mapping) +
5821 sizeof(struct host_status_block);
5823 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5824 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5825 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5827 &bnx2x_fp(bp, i, tx_desc_mapping),
5828 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5830 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5831 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5832 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5833 &bnx2x_fp(bp, i, rx_desc_mapping),
5834 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5836 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5837 &bnx2x_fp(bp, i, rx_comp_mapping),
5838 sizeof(struct eth_fast_path_rx_cqe) *
5842 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5843 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5845 &bnx2x_fp(bp, i, rx_sge_mapping),
5846 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5848 /* end of fastpath */
5850 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5851 sizeof(struct host_def_status_block));
5853 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5854 sizeof(struct bnx2x_slowpath));
5857 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5860 for (i = 0; i < 64*1024; i += 64) {
5861 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5862 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5865 /* allocate searcher T2 table
5866 we allocate 1/4 of alloc num for T2
5867 (which is not entered into the ILT) */
5868 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5871 for (i = 0; i < 16*1024; i += 64)
5872 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5874 /* now fixup the last line in the block to point to the next block */
5875 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5877 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5878 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5880 /* QM queues (128*MAX_CONN) */
5881 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5884 /* Slow path ring */
5885 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5893 #undef BNX2X_PCI_ALLOC
5897 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5901 for_each_queue(bp, i) {
5902 struct bnx2x_fastpath *fp = &bp->fp[i];
5904 u16 bd_cons = fp->tx_bd_cons;
5905 u16 sw_prod = fp->tx_pkt_prod;
5906 u16 sw_cons = fp->tx_pkt_cons;
5908 while (sw_cons != sw_prod) {
5909 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5915 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5919 for_each_queue(bp, j) {
5920 struct bnx2x_fastpath *fp = &bp->fp[j];
5922 for (i = 0; i < NUM_RX_BD; i++) {
5923 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5924 struct sk_buff *skb = rx_buf->skb;
5929 pci_unmap_single(bp->pdev,
5930 pci_unmap_addr(rx_buf, mapping),
5931 bp->rx_buf_use_size,
5932 PCI_DMA_FROMDEVICE);
5937 if (!fp->disable_tpa)
5938 bnx2x_free_tpa_pool(bp, fp,
5939 ETH_MAX_AGGREGATION_QUEUES_E1H);
5943 static void bnx2x_free_skbs(struct bnx2x *bp)
5945 bnx2x_free_tx_skbs(bp);
5946 bnx2x_free_rx_skbs(bp);
5949 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5953 free_irq(bp->msix_table[0].vector, bp->dev);
5954 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5955 bp->msix_table[0].vector);
5957 for_each_queue(bp, i) {
5958 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5959 "state %x\n", i, bp->msix_table[i + offset].vector,
5960 bnx2x_fp(bp, i, state));
5962 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5963 BNX2X_ERR("IRQ of fp #%d being freed while "
5964 "state != closed\n", i);
5966 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5970 static void bnx2x_free_irq(struct bnx2x *bp)
5972 if (bp->flags & USING_MSIX_FLAG) {
5973 bnx2x_free_msix_irqs(bp);
5974 pci_disable_msix(bp->pdev);
5975 bp->flags &= ~USING_MSIX_FLAG;
5978 free_irq(bp->pdev->irq, bp->dev);
5981 static int bnx2x_enable_msix(struct bnx2x *bp)
5985 bp->msix_table[0].entry = 0;
5987 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5989 for_each_queue(bp, i) {
5990 int igu_vec = offset + i + BP_L_ID(bp);
5992 bp->msix_table[i + offset].entry = igu_vec;
5993 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5994 "(fastpath #%u)\n", i + offset, igu_vec, i);
5997 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5998 bp->num_queues + offset);
6000 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6003 bp->flags |= USING_MSIX_FLAG;
6008 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6010 int i, rc, offset = 1;
6012 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6013 bp->dev->name, bp->dev);
6015 BNX2X_ERR("request sp irq failed\n");
6019 for_each_queue(bp, i) {
6020 rc = request_irq(bp->msix_table[i + offset].vector,
6021 bnx2x_msix_fp_int, 0,
6022 bp->dev->name, &bp->fp[i]);
6024 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6026 bnx2x_free_msix_irqs(bp);
6030 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6036 static int bnx2x_req_irq(struct bnx2x *bp)
6040 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6041 bp->dev->name, bp->dev);
6043 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6049 * Init service functions
6052 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6054 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6055 int port = BP_PORT(bp);
6058 * unicasts 0-31:port0 32-63:port1
6059 * multicast 64-127:port0 128-191:port1
6061 config->hdr.length_6b = 2;
6062 config->hdr.offset = port ? 31 : 0;
6063 config->hdr.client_id = BP_CL_ID(bp);
6064 config->hdr.reserved1 = 0;
6067 config->config_table[0].cam_entry.msb_mac_addr =
6068 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6069 config->config_table[0].cam_entry.middle_mac_addr =
6070 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6071 config->config_table[0].cam_entry.lsb_mac_addr =
6072 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6073 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6074 config->config_table[0].target_table_entry.flags = 0;
6075 config->config_table[0].target_table_entry.client_id = 0;
6076 config->config_table[0].target_table_entry.vlan_id = 0;
6078 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6079 config->config_table[0].cam_entry.msb_mac_addr,
6080 config->config_table[0].cam_entry.middle_mac_addr,
6081 config->config_table[0].cam_entry.lsb_mac_addr);
6084 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6085 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6086 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6087 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6088 config->config_table[1].target_table_entry.flags =
6089 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6090 config->config_table[1].target_table_entry.client_id = 0;
6091 config->config_table[1].target_table_entry.vlan_id = 0;
6093 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6094 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6095 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6098 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6100 struct mac_configuration_cmd_e1h *config =
6101 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6103 if (bp->state != BNX2X_STATE_OPEN) {
6104 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6108 /* CAM allocation for E1H
6109 * unicasts: by func number
6110 * multicast: 20+FUNC*20, 20 each
6112 config->hdr.length_6b = 1;
6113 config->hdr.offset = BP_FUNC(bp);
6114 config->hdr.client_id = BP_CL_ID(bp);
6115 config->hdr.reserved1 = 0;
6118 config->config_table[0].msb_mac_addr =
6119 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6120 config->config_table[0].middle_mac_addr =
6121 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6122 config->config_table[0].lsb_mac_addr =
6123 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6124 config->config_table[0].client_id = BP_L_ID(bp);
6125 config->config_table[0].vlan_id = 0;
6126 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6127 config->config_table[0].flags = BP_PORT(bp);
6129 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6130 config->config_table[0].msb_mac_addr,
6131 config->config_table[0].middle_mac_addr,
6132 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6134 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6135 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6136 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6139 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6140 int *state_p, int poll)
6142 /* can take a while if any port is running */
6145 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6146 poll ? "polling" : "waiting", state, idx);
6151 bnx2x_rx_int(bp->fp, 10);
6152 /* if index is different from 0
6153 * the reply for some commands will
6154 * be on the none default queue
6157 bnx2x_rx_int(&bp->fp[idx], 10);
6159 mb(); /* state is changed by bnx2x_sp_event() */
6161 if (*state_p == state)
6168 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6169 poll ? "polling" : "waiting", state, idx);
6170 #ifdef BNX2X_STOP_ON_ERROR
6177 static int bnx2x_setup_leading(struct bnx2x *bp)
6181 /* reset IGU state */
6182 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6185 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6187 /* Wait for completion */
6188 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6193 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6195 /* reset IGU state */
6196 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6199 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6200 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6202 /* Wait for completion */
6203 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6204 &(bp->fp[index].state), 0);
6207 static int bnx2x_poll(struct napi_struct *napi, int budget);
6208 static void bnx2x_set_rx_mode(struct net_device *dev);
6210 /* must be called with rtnl_lock */
6211 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6216 #ifdef BNX2X_STOP_ON_ERROR
6217 if (unlikely(bp->panic))
6221 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6223 /* Send LOAD_REQUEST command to MCP
6224 Returns the type of LOAD command:
6225 if it is the first port to be initialized
6226 common blocks should be initialized, otherwise - not
6228 if (!BP_NOMCP(bp)) {
6229 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6231 BNX2X_ERR("MCP response failure, unloading\n");
6234 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6235 return -EBUSY; /* other port in diagnostic mode */
6238 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6239 load_count[0], load_count[1], load_count[2]);
6241 load_count[1 + BP_PORT(bp)]++;
6242 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6243 load_count[0], load_count[1], load_count[2]);
6244 if (load_count[0] == 1)
6245 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6246 else if (load_count[1 + BP_PORT(bp)] == 1)
6247 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6249 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6252 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6253 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6257 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6259 /* if we can't use MSI-X we only need one fp,
6260 * so try to enable MSI-X with the requested number of fp's
6261 * and fallback to inta with one fp
6267 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6268 /* user requested number */
6269 bp->num_queues = use_multi;
6272 bp->num_queues = min_t(u32, num_online_cpus(),
6277 if (bnx2x_enable_msix(bp)) {
6278 /* failed to enable MSI-X */
6281 BNX2X_ERR("Multi requested but failed"
6282 " to enable MSI-X\n");
6286 "set number of queues to %d\n", bp->num_queues);
6288 if (bnx2x_alloc_mem(bp))
6291 for_each_queue(bp, i)
6292 bnx2x_fp(bp, i, disable_tpa) =
6293 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6295 /* Disable interrupt handling until HW is initialized */
6296 atomic_set(&bp->intr_sem, 1);
6298 if (bp->flags & USING_MSIX_FLAG) {
6299 rc = bnx2x_req_msix_irqs(bp);
6301 pci_disable_msix(bp->pdev);
6306 rc = bnx2x_req_irq(bp);
6308 BNX2X_ERR("IRQ request failed, aborting\n");
6313 for_each_queue(bp, i)
6314 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6318 rc = bnx2x_init_hw(bp, load_code);
6320 BNX2X_ERR("HW init failed, aborting\n");
6324 /* Enable interrupt handling */
6325 atomic_set(&bp->intr_sem, 0);
6327 /* Setup NIC internals and enable interrupts */
6330 /* Send LOAD_DONE command to MCP */
6331 if (!BP_NOMCP(bp)) {
6332 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6334 BNX2X_ERR("MCP response failure, unloading\n");
6336 goto load_int_disable;
6340 bnx2x_stats_init(bp);
6342 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6344 /* Enable Rx interrupt handling before sending the ramrod
6345 as it's completed on Rx FP queue */
6346 for_each_queue(bp, i)
6347 napi_enable(&bnx2x_fp(bp, i, napi));
6349 rc = bnx2x_setup_leading(bp);
6351 #ifdef BNX2X_STOP_ON_ERROR
6354 goto load_stop_netif;
6357 if (CHIP_IS_E1H(bp))
6358 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6359 BNX2X_ERR("!!! mf_cfg function disabled\n");
6360 bp->state = BNX2X_STATE_DISABLED;
6363 if (bp->state == BNX2X_STATE_OPEN)
6364 for_each_nondefault_queue(bp, i) {
6365 rc = bnx2x_setup_multi(bp, i);
6367 goto load_stop_netif;
6371 bnx2x_set_mac_addr_e1(bp);
6373 bnx2x_set_mac_addr_e1h(bp);
6376 bnx2x_initial_phy_init(bp);
6378 /* Start fast path */
6379 switch (load_mode) {
6381 /* Tx queue should be only reenabled */
6382 netif_wake_queue(bp->dev);
6383 bnx2x_set_rx_mode(bp->dev);
6387 /* IRQ is only requested from bnx2x_open */
6388 netif_start_queue(bp->dev);
6389 bnx2x_set_rx_mode(bp->dev);
6390 if (bp->flags & USING_MSIX_FLAG)
6391 printk(KERN_INFO PFX "%s: using MSI-X\n",
6396 bnx2x_set_rx_mode(bp->dev);
6397 bp->state = BNX2X_STATE_DIAG;
6405 bnx2x__link_status_update(bp);
6407 /* start the timer */
6408 mod_timer(&bp->timer, jiffies + bp->current_interval);
6414 for_each_queue(bp, i)
6415 napi_disable(&bnx2x_fp(bp, i, napi));
6418 bnx2x_int_disable_sync(bp);
6423 /* Free SKBs, SGEs, TPA pool and driver internals */
6424 bnx2x_free_skbs(bp);
6425 for_each_queue(bp, i)
6426 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6427 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6431 /* TBD we really need to reset the chip
6432 if we want to recover from this */
6436 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6440 /* halt the connection */
6441 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6442 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6444 /* Wait for completion */
6445 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6446 &(bp->fp[index].state), 1);
6447 if (rc) /* timeout */
6450 /* delete cfc entry */
6451 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6453 /* Wait for completion */
6454 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6455 &(bp->fp[index].state), 1);
6459 static void bnx2x_stop_leading(struct bnx2x *bp)
6461 u16 dsb_sp_prod_idx;
6462 /* if the other port is handling traffic,
6463 this can take a lot of time */
6469 /* Send HALT ramrod */
6470 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6471 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6473 /* Wait for completion */
6474 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6475 &(bp->fp[0].state), 1);
6476 if (rc) /* timeout */
6479 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6481 /* Send PORT_DELETE ramrod */
6482 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6484 /* Wait for completion to arrive on default status block
6485 we are going to reset the chip anyway
6486 so there is not much to do if this times out
6488 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6491 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6492 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6493 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6494 #ifdef BNX2X_STOP_ON_ERROR
6501 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6502 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6505 static void bnx2x_reset_func(struct bnx2x *bp)
6507 int port = BP_PORT(bp);
6508 int func = BP_FUNC(bp);
6512 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6513 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6515 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6518 base = FUNC_ILT_BASE(func);
6519 for (i = base; i < base + ILT_PER_FUNC; i++)
6520 bnx2x_ilt_wr(bp, i, 0);
6523 static void bnx2x_reset_port(struct bnx2x *bp)
6525 int port = BP_PORT(bp);
6528 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6530 /* Do not rcv packets to BRB */
6531 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6532 /* Do not direct rcv packets that are not for MCP to the BRB */
6533 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6534 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6537 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6540 /* Check for BRB port occupancy */
6541 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6543 DP(NETIF_MSG_IFDOWN,
6544 "BRB1 is not empty %d blooks are occupied\n", val);
6546 /* TODO: Close Doorbell port? */
6549 static void bnx2x_reset_common(struct bnx2x *bp)
6552 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6557 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6559 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6560 BP_FUNC(bp), reset_code);
6562 switch (reset_code) {
6563 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6564 bnx2x_reset_port(bp);
6565 bnx2x_reset_func(bp);
6566 bnx2x_reset_common(bp);
6569 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6570 bnx2x_reset_port(bp);
6571 bnx2x_reset_func(bp);
6574 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6575 bnx2x_reset_func(bp);
6579 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6584 /* msut be called with rtnl_lock */
6585 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6590 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6592 bp->rx_mode = BNX2X_RX_MODE_NONE;
6593 bnx2x_set_storm_rx_mode(bp);
6595 if (netif_running(bp->dev)) {
6596 netif_tx_disable(bp->dev);
6597 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6600 del_timer_sync(&bp->timer);
6601 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6602 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6603 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6605 /* Wait until all fast path tasks complete */
6606 for_each_queue(bp, i) {
6607 struct bnx2x_fastpath *fp = &bp->fp[i];
6609 #ifdef BNX2X_STOP_ON_ERROR
6610 #ifdef __powerpc64__
6611 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6613 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6615 fp->tpa_queue_used);
6619 while (bnx2x_has_work(fp)) {
6622 BNX2X_ERR("timeout waiting for queue[%d]\n",
6624 #ifdef BNX2X_STOP_ON_ERROR
6636 /* Wait until all slow path tasks complete */
6638 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6641 for_each_queue(bp, i)
6642 napi_disable(&bnx2x_fp(bp, i, napi));
6643 /* Disable interrupts after Tx and Rx are disabled on stack level */
6644 bnx2x_int_disable_sync(bp);
6649 if (bp->flags & NO_WOL_FLAG)
6650 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6653 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6654 u8 *mac_addr = bp->dev->dev_addr;
6657 /* The mac address is written to entries 1-4 to
6658 preserve entry 0 which is used by the PMF */
6659 val = (mac_addr[0] << 8) | mac_addr[1];
6660 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
6662 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6663 (mac_addr[4] << 8) | mac_addr[5];
6664 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6667 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6670 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6672 /* Close multi and leading connections
6673 Completions for ramrods are collected in a synchronous way */
6674 for_each_nondefault_queue(bp, i)
6675 if (bnx2x_stop_multi(bp, i))
6678 if (CHIP_IS_E1H(bp))
6679 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6681 bnx2x_stop_leading(bp);
6682 #ifdef BNX2X_STOP_ON_ERROR
6683 /* If ramrod completion timed out - break here! */
6685 BNX2X_ERR("Stop leading failed!\n");
6690 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6691 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6692 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6693 "state 0x%x fp[0].state 0x%x\n",
6694 bp->state, bp->fp[0].state);
6699 reset_code = bnx2x_fw_command(bp, reset_code);
6701 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6702 load_count[0], load_count[1], load_count[2]);
6704 load_count[1 + BP_PORT(bp)]--;
6705 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6706 load_count[0], load_count[1], load_count[2]);
6707 if (load_count[0] == 0)
6708 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6709 else if (load_count[1 + BP_PORT(bp)] == 0)
6710 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6712 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6715 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6716 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6717 bnx2x__link_reset(bp);
6719 /* Reset the chip */
6720 bnx2x_reset_chip(bp, reset_code);
6722 /* Report UNLOAD_DONE to MCP */
6724 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6726 /* Free SKBs, SGEs, TPA pool and driver internals */
6727 bnx2x_free_skbs(bp);
6728 for_each_queue(bp, i)
6729 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6730 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6733 bp->state = BNX2X_STATE_CLOSED;
6735 netif_carrier_off(bp->dev);
6740 static void bnx2x_reset_task(struct work_struct *work)
6742 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6744 #ifdef BNX2X_STOP_ON_ERROR
6745 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6746 " so reset not done to allow debug dump,\n"
6747 KERN_ERR " you will need to reboot when done\n");
6753 if (!netif_running(bp->dev))
6754 goto reset_task_exit;
6756 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6757 bnx2x_nic_load(bp, LOAD_NORMAL);
6763 /* end of nic load/unload */
6768 * Init service functions
6771 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6775 /* Check if there is any driver already loaded */
6776 val = REG_RD(bp, MISC_REG_UNPREPARED);
6778 /* Check if it is the UNDI driver
6779 * UNDI driver initializes CID offset for normal bell to 0x7
6781 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6783 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6784 /* save our func and fw_seq */
6785 int func = BP_FUNC(bp);
6786 u16 fw_seq = bp->fw_seq;
6788 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6790 /* try unload UNDI on port 0 */
6792 bp->fw_seq = (SHMEM_RD(bp,
6793 func_mb[bp->func].drv_mb_header) &
6794 DRV_MSG_SEQ_NUMBER_MASK);
6796 reset_code = bnx2x_fw_command(bp, reset_code);
6797 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6799 /* if UNDI is loaded on the other port */
6800 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6803 bp->fw_seq = (SHMEM_RD(bp,
6804 func_mb[bp->func].drv_mb_header) &
6805 DRV_MSG_SEQ_NUMBER_MASK);
6807 bnx2x_fw_command(bp,
6808 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6809 bnx2x_fw_command(bp,
6810 DRV_MSG_CODE_UNLOAD_DONE);
6812 /* restore our func and fw_seq */
6814 bp->fw_seq = fw_seq;
6819 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6822 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6828 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6830 u32 val, val2, val3, val4, id;
6832 /* Get the chip revision id and number. */
6833 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6834 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6835 id = ((val & 0xffff) << 16);
6836 val = REG_RD(bp, MISC_REG_CHIP_REV);
6837 id |= ((val & 0xf) << 12);
6838 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6839 id |= ((val & 0xff) << 4);
6840 REG_RD(bp, MISC_REG_BOND_ID);
6842 bp->common.chip_id = id;
6843 bp->link_params.chip_id = bp->common.chip_id;
6844 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6846 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6847 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6848 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6849 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6850 bp->common.flash_size, bp->common.flash_size);
6852 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6853 bp->link_params.shmem_base = bp->common.shmem_base;
6854 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6856 if (!bp->common.shmem_base ||
6857 (bp->common.shmem_base < 0xA0000) ||
6858 (bp->common.shmem_base >= 0xC0000)) {
6859 BNX2X_DEV_INFO("MCP not active\n");
6860 bp->flags |= NO_MCP_FLAG;
6864 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6865 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6866 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6867 BNX2X_ERR("BAD MCP validity signature\n");
6869 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6870 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6872 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6873 bp->common.hw_config, bp->common.board);
6875 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6876 SHARED_HW_CFG_LED_MODE_MASK) >>
6877 SHARED_HW_CFG_LED_MODE_SHIFT);
6879 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6880 bp->common.bc_ver = val;
6881 BNX2X_DEV_INFO("bc_ver %X\n", val);
6882 if (val < BNX2X_BC_VER) {
6883 /* for now only warn
6884 * later we might need to enforce this */
6885 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6886 " please upgrade BC\n", BNX2X_BC_VER, val);
6888 BNX2X_DEV_INFO("%sWoL Capable\n",
6889 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6891 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6892 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6893 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6894 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6896 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6897 val, val2, val3, val4);
6900 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6903 int port = BP_PORT(bp);
6906 switch (switch_cfg) {
6908 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6911 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6912 switch (ext_phy_type) {
6913 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6914 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6917 bp->port.supported |= (SUPPORTED_10baseT_Half |
6918 SUPPORTED_10baseT_Full |
6919 SUPPORTED_100baseT_Half |
6920 SUPPORTED_100baseT_Full |
6921 SUPPORTED_1000baseT_Full |
6922 SUPPORTED_2500baseX_Full |
6927 SUPPORTED_Asym_Pause);
6930 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6931 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6934 bp->port.supported |= (SUPPORTED_10baseT_Half |
6935 SUPPORTED_10baseT_Full |
6936 SUPPORTED_100baseT_Half |
6937 SUPPORTED_100baseT_Full |
6938 SUPPORTED_1000baseT_Full |
6943 SUPPORTED_Asym_Pause);
6947 BNX2X_ERR("NVRAM config error. "
6948 "BAD SerDes ext_phy_config 0x%x\n",
6949 bp->link_params.ext_phy_config);
6953 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6955 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6958 case SWITCH_CFG_10G:
6959 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6962 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6963 switch (ext_phy_type) {
6964 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6965 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6968 bp->port.supported |= (SUPPORTED_10baseT_Half |
6969 SUPPORTED_10baseT_Full |
6970 SUPPORTED_100baseT_Half |
6971 SUPPORTED_100baseT_Full |
6972 SUPPORTED_1000baseT_Full |
6973 SUPPORTED_2500baseX_Full |
6974 SUPPORTED_10000baseT_Full |
6979 SUPPORTED_Asym_Pause);
6982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6983 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6986 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6989 SUPPORTED_Asym_Pause);
6992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6993 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6996 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6997 SUPPORTED_1000baseT_Full |
7000 SUPPORTED_Asym_Pause);
7003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7004 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7007 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7008 SUPPORTED_1000baseT_Full |
7012 SUPPORTED_Asym_Pause);
7015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7016 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7019 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7020 SUPPORTED_2500baseX_Full |
7021 SUPPORTED_1000baseT_Full |
7025 SUPPORTED_Asym_Pause);
7028 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7029 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7032 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7036 SUPPORTED_Asym_Pause);
7039 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7040 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7041 bp->link_params.ext_phy_config);
7045 BNX2X_ERR("NVRAM config error. "
7046 "BAD XGXS ext_phy_config 0x%x\n",
7047 bp->link_params.ext_phy_config);
7051 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7053 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7058 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7059 bp->port.link_config);
7062 bp->link_params.phy_addr = bp->port.phy_addr;
7064 /* mask what we support according to speed_cap_mask */
7065 if (!(bp->link_params.speed_cap_mask &
7066 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7067 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7069 if (!(bp->link_params.speed_cap_mask &
7070 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7071 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7073 if (!(bp->link_params.speed_cap_mask &
7074 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7075 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7077 if (!(bp->link_params.speed_cap_mask &
7078 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7079 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7081 if (!(bp->link_params.speed_cap_mask &
7082 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7083 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7084 SUPPORTED_1000baseT_Full);
7086 if (!(bp->link_params.speed_cap_mask &
7087 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7088 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7090 if (!(bp->link_params.speed_cap_mask &
7091 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7092 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7094 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7097 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7099 bp->link_params.req_duplex = DUPLEX_FULL;
7101 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7102 case PORT_FEATURE_LINK_SPEED_AUTO:
7103 if (bp->port.supported & SUPPORTED_Autoneg) {
7104 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7105 bp->port.advertising = bp->port.supported;
7108 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7110 if ((ext_phy_type ==
7111 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7113 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7114 /* force 10G, no AN */
7115 bp->link_params.req_line_speed = SPEED_10000;
7116 bp->port.advertising =
7117 (ADVERTISED_10000baseT_Full |
7121 BNX2X_ERR("NVRAM config error. "
7122 "Invalid link_config 0x%x"
7123 " Autoneg not supported\n",
7124 bp->port.link_config);
7129 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7130 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7131 bp->link_params.req_line_speed = SPEED_10;
7132 bp->port.advertising = (ADVERTISED_10baseT_Full |
7135 BNX2X_ERR("NVRAM config error. "
7136 "Invalid link_config 0x%x"
7137 " speed_cap_mask 0x%x\n",
7138 bp->port.link_config,
7139 bp->link_params.speed_cap_mask);
7144 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7145 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7146 bp->link_params.req_line_speed = SPEED_10;
7147 bp->link_params.req_duplex = DUPLEX_HALF;
7148 bp->port.advertising = (ADVERTISED_10baseT_Half |
7151 BNX2X_ERR("NVRAM config error. "
7152 "Invalid link_config 0x%x"
7153 " speed_cap_mask 0x%x\n",
7154 bp->port.link_config,
7155 bp->link_params.speed_cap_mask);
7160 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7161 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7162 bp->link_params.req_line_speed = SPEED_100;
7163 bp->port.advertising = (ADVERTISED_100baseT_Full |
7166 BNX2X_ERR("NVRAM config error. "
7167 "Invalid link_config 0x%x"
7168 " speed_cap_mask 0x%x\n",
7169 bp->port.link_config,
7170 bp->link_params.speed_cap_mask);
7175 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7176 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7177 bp->link_params.req_line_speed = SPEED_100;
7178 bp->link_params.req_duplex = DUPLEX_HALF;
7179 bp->port.advertising = (ADVERTISED_100baseT_Half |
7182 BNX2X_ERR("NVRAM config error. "
7183 "Invalid link_config 0x%x"
7184 " speed_cap_mask 0x%x\n",
7185 bp->port.link_config,
7186 bp->link_params.speed_cap_mask);
7191 case PORT_FEATURE_LINK_SPEED_1G:
7192 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7193 bp->link_params.req_line_speed = SPEED_1000;
7194 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7197 BNX2X_ERR("NVRAM config error. "
7198 "Invalid link_config 0x%x"
7199 " speed_cap_mask 0x%x\n",
7200 bp->port.link_config,
7201 bp->link_params.speed_cap_mask);
7206 case PORT_FEATURE_LINK_SPEED_2_5G:
7207 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7208 bp->link_params.req_line_speed = SPEED_2500;
7209 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7212 BNX2X_ERR("NVRAM config error. "
7213 "Invalid link_config 0x%x"
7214 " speed_cap_mask 0x%x\n",
7215 bp->port.link_config,
7216 bp->link_params.speed_cap_mask);
7221 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7222 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7223 case PORT_FEATURE_LINK_SPEED_10G_KR:
7224 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7225 bp->link_params.req_line_speed = SPEED_10000;
7226 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7229 BNX2X_ERR("NVRAM config error. "
7230 "Invalid link_config 0x%x"
7231 " speed_cap_mask 0x%x\n",
7232 bp->port.link_config,
7233 bp->link_params.speed_cap_mask);
7239 BNX2X_ERR("NVRAM config error. "
7240 "BAD link speed link_config 0x%x\n",
7241 bp->port.link_config);
7242 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7243 bp->port.advertising = bp->port.supported;
7247 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7248 PORT_FEATURE_FLOW_CONTROL_MASK);
7249 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7250 !(bp->port.supported & SUPPORTED_Autoneg))
7251 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7253 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7254 " advertising 0x%x\n",
7255 bp->link_params.req_line_speed,
7256 bp->link_params.req_duplex,
7257 bp->link_params.req_flow_ctrl, bp->port.advertising);
7260 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7262 int port = BP_PORT(bp);
7265 bp->link_params.bp = bp;
7266 bp->link_params.port = port;
7268 bp->link_params.serdes_config =
7269 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7270 bp->link_params.lane_config =
7271 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7272 bp->link_params.ext_phy_config =
7274 dev_info.port_hw_config[port].external_phy_config);
7275 bp->link_params.speed_cap_mask =
7277 dev_info.port_hw_config[port].speed_capability_mask);
7279 bp->port.link_config =
7280 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7282 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7283 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7284 " link_config 0x%08x\n",
7285 bp->link_params.serdes_config,
7286 bp->link_params.lane_config,
7287 bp->link_params.ext_phy_config,
7288 bp->link_params.speed_cap_mask, bp->port.link_config);
7290 bp->link_params.switch_cfg = (bp->port.link_config &
7291 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7292 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7294 bnx2x_link_settings_requested(bp);
7296 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7297 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7298 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7299 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7300 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7301 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7302 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7303 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7304 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7305 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7308 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7310 int func = BP_FUNC(bp);
7314 bnx2x_get_common_hwinfo(bp);
7318 if (CHIP_IS_E1H(bp)) {
7320 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7323 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7324 FUNC_MF_CFG_E1HOV_TAG_MASK);
7325 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7329 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7331 func, bp->e1hov, bp->e1hov);
7333 BNX2X_DEV_INFO("Single function mode\n");
7335 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7336 " aborting\n", func);
7342 if (!BP_NOMCP(bp)) {
7343 bnx2x_get_port_hwinfo(bp);
7345 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7346 DRV_MSG_SEQ_NUMBER_MASK);
7347 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7351 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7352 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7353 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7354 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7355 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7356 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7357 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7358 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7359 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7360 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7361 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7363 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7371 /* only supposed to happen on emulation/FPGA */
7372 BNX2X_ERR("warning rendom MAC workaround active\n");
7373 random_ether_addr(bp->dev->dev_addr);
7374 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7380 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7382 int func = BP_FUNC(bp);
7385 mutex_init(&bp->port.phy_mutex);
7387 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7388 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7390 rc = bnx2x_get_hwinfo(bp);
7392 /* need to reset chip if undi was active */
7394 bnx2x_undi_unload(bp);
7396 if (CHIP_REV_IS_FPGA(bp))
7397 printk(KERN_ERR PFX "FPGA detected\n");
7399 if (BP_NOMCP(bp) && (func == 0))
7401 "MCP disabled, must load devices in order!\n");
7405 bp->flags &= ~TPA_ENABLE_FLAG;
7406 bp->dev->features &= ~NETIF_F_LRO;
7408 bp->flags |= TPA_ENABLE_FLAG;
7409 bp->dev->features |= NETIF_F_LRO;
7413 bp->tx_ring_size = MAX_TX_AVAIL;
7414 bp->rx_ring_size = MAX_RX_AVAIL;
7422 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7423 bp->current_interval = (poll ? poll : bp->timer_interval);
7425 init_timer(&bp->timer);
7426 bp->timer.expires = jiffies + bp->current_interval;
7427 bp->timer.data = (unsigned long) bp;
7428 bp->timer.function = bnx2x_timer;
7434 * ethtool service functions
7437 /* All ethtool functions called with rtnl_lock */
7439 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7441 struct bnx2x *bp = netdev_priv(dev);
7443 cmd->supported = bp->port.supported;
7444 cmd->advertising = bp->port.advertising;
7446 if (netif_carrier_ok(dev)) {
7447 cmd->speed = bp->link_vars.line_speed;
7448 cmd->duplex = bp->link_vars.duplex;
7450 cmd->speed = bp->link_params.req_line_speed;
7451 cmd->duplex = bp->link_params.req_duplex;
7456 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7457 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7458 if (vn_max_rate < cmd->speed)
7459 cmd->speed = vn_max_rate;
7462 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7464 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7466 switch (ext_phy_type) {
7467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7472 cmd->port = PORT_FIBRE;
7475 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7476 cmd->port = PORT_TP;
7479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7480 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7481 bp->link_params.ext_phy_config);
7485 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7486 bp->link_params.ext_phy_config);
7490 cmd->port = PORT_TP;
7492 cmd->phy_address = bp->port.phy_addr;
7493 cmd->transceiver = XCVR_INTERNAL;
7495 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7496 cmd->autoneg = AUTONEG_ENABLE;
7498 cmd->autoneg = AUTONEG_DISABLE;
7503 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7504 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7505 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7506 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7507 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7508 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7509 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7514 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7516 struct bnx2x *bp = netdev_priv(dev);
7522 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7523 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7524 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7525 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7526 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7527 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7528 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7530 if (cmd->autoneg == AUTONEG_ENABLE) {
7531 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7532 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7536 /* advertise the requested speed and duplex if supported */
7537 cmd->advertising &= bp->port.supported;
7539 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7540 bp->link_params.req_duplex = DUPLEX_FULL;
7541 bp->port.advertising |= (ADVERTISED_Autoneg |
7544 } else { /* forced speed */
7545 /* advertise the requested speed and duplex if supported */
7546 switch (cmd->speed) {
7548 if (cmd->duplex == DUPLEX_FULL) {
7549 if (!(bp->port.supported &
7550 SUPPORTED_10baseT_Full)) {
7552 "10M full not supported\n");
7556 advertising = (ADVERTISED_10baseT_Full |
7559 if (!(bp->port.supported &
7560 SUPPORTED_10baseT_Half)) {
7562 "10M half not supported\n");
7566 advertising = (ADVERTISED_10baseT_Half |
7572 if (cmd->duplex == DUPLEX_FULL) {
7573 if (!(bp->port.supported &
7574 SUPPORTED_100baseT_Full)) {
7576 "100M full not supported\n");
7580 advertising = (ADVERTISED_100baseT_Full |
7583 if (!(bp->port.supported &
7584 SUPPORTED_100baseT_Half)) {
7586 "100M half not supported\n");
7590 advertising = (ADVERTISED_100baseT_Half |
7596 if (cmd->duplex != DUPLEX_FULL) {
7597 DP(NETIF_MSG_LINK, "1G half not supported\n");
7601 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7602 DP(NETIF_MSG_LINK, "1G full not supported\n");
7606 advertising = (ADVERTISED_1000baseT_Full |
7611 if (cmd->duplex != DUPLEX_FULL) {
7613 "2.5G half not supported\n");
7617 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7619 "2.5G full not supported\n");
7623 advertising = (ADVERTISED_2500baseX_Full |
7628 if (cmd->duplex != DUPLEX_FULL) {
7629 DP(NETIF_MSG_LINK, "10G half not supported\n");
7633 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7634 DP(NETIF_MSG_LINK, "10G full not supported\n");
7638 advertising = (ADVERTISED_10000baseT_Full |
7643 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7647 bp->link_params.req_line_speed = cmd->speed;
7648 bp->link_params.req_duplex = cmd->duplex;
7649 bp->port.advertising = advertising;
7652 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7653 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7654 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7655 bp->port.advertising);
7657 if (netif_running(dev)) {
7658 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7665 #define PHY_FW_VER_LEN 10
7667 static void bnx2x_get_drvinfo(struct net_device *dev,
7668 struct ethtool_drvinfo *info)
7670 struct bnx2x *bp = netdev_priv(dev);
7671 char phy_fw_ver[PHY_FW_VER_LEN];
7673 strcpy(info->driver, DRV_MODULE_NAME);
7674 strcpy(info->version, DRV_MODULE_VERSION);
7676 phy_fw_ver[0] = '\0';
7678 bnx2x_phy_hw_lock(bp);
7679 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7680 (bp->state != BNX2X_STATE_CLOSED),
7681 phy_fw_ver, PHY_FW_VER_LEN);
7682 bnx2x_phy_hw_unlock(bp);
7685 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7686 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7687 BCM_5710_FW_REVISION_VERSION,
7688 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7689 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7690 strcpy(info->bus_info, pci_name(bp->pdev));
7691 info->n_stats = BNX2X_NUM_STATS;
7692 info->testinfo_len = BNX2X_NUM_TESTS;
7693 info->eedump_len = bp->common.flash_size;
7694 info->regdump_len = 0;
7697 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7699 struct bnx2x *bp = netdev_priv(dev);
7701 if (bp->flags & NO_WOL_FLAG) {
7705 wol->supported = WAKE_MAGIC;
7707 wol->wolopts = WAKE_MAGIC;
7711 memset(&wol->sopass, 0, sizeof(wol->sopass));
7714 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7716 struct bnx2x *bp = netdev_priv(dev);
7718 if (wol->wolopts & ~WAKE_MAGIC)
7721 if (wol->wolopts & WAKE_MAGIC) {
7722 if (bp->flags & NO_WOL_FLAG)
7732 static u32 bnx2x_get_msglevel(struct net_device *dev)
7734 struct bnx2x *bp = netdev_priv(dev);
7736 return bp->msglevel;
7739 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7741 struct bnx2x *bp = netdev_priv(dev);
7743 if (capable(CAP_NET_ADMIN))
7744 bp->msglevel = level;
7747 static int bnx2x_nway_reset(struct net_device *dev)
7749 struct bnx2x *bp = netdev_priv(dev);
7754 if (netif_running(dev)) {
7755 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7762 static int bnx2x_get_eeprom_len(struct net_device *dev)
7764 struct bnx2x *bp = netdev_priv(dev);
7766 return bp->common.flash_size;
7769 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7771 int port = BP_PORT(bp);
7775 /* adjust timeout for emulation/FPGA */
7776 count = NVRAM_TIMEOUT_COUNT;
7777 if (CHIP_REV_IS_SLOW(bp))
7780 /* request access to nvram interface */
7781 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7782 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7784 for (i = 0; i < count*10; i++) {
7785 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7786 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7792 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7793 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7800 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7802 int port = BP_PORT(bp);
7806 /* adjust timeout for emulation/FPGA */
7807 count = NVRAM_TIMEOUT_COUNT;
7808 if (CHIP_REV_IS_SLOW(bp))
7811 /* relinquish nvram interface */
7812 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7813 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7815 for (i = 0; i < count*10; i++) {
7816 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7817 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7823 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7824 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7831 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7835 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7837 /* enable both bits, even on read */
7838 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7839 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7840 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7843 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7847 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7849 /* disable both bits, even after read */
7850 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7851 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7852 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7855 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7861 /* build the command word */
7862 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7864 /* need to clear DONE bit separately */
7865 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7867 /* address of the NVRAM to read from */
7868 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7869 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7871 /* issue a read command */
7872 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7874 /* adjust timeout for emulation/FPGA */
7875 count = NVRAM_TIMEOUT_COUNT;
7876 if (CHIP_REV_IS_SLOW(bp))
7879 /* wait for completion */
7882 for (i = 0; i < count; i++) {
7884 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7886 if (val & MCPR_NVM_COMMAND_DONE) {
7887 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7888 /* we read nvram data in cpu order
7889 * but ethtool sees it as an array of bytes
7890 * converting to big-endian will do the work */
7891 val = cpu_to_be32(val);
7901 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7908 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7910 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7915 if (offset + buf_size > bp->common.flash_size) {
7916 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7917 " buf_size (0x%x) > flash_size (0x%x)\n",
7918 offset, buf_size, bp->common.flash_size);
7922 /* request access to nvram interface */
7923 rc = bnx2x_acquire_nvram_lock(bp);
7927 /* enable access to nvram interface */
7928 bnx2x_enable_nvram_access(bp);
7930 /* read the first word(s) */
7931 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7932 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7933 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7934 memcpy(ret_buf, &val, 4);
7936 /* advance to the next dword */
7937 offset += sizeof(u32);
7938 ret_buf += sizeof(u32);
7939 buf_size -= sizeof(u32);
7944 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7945 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7946 memcpy(ret_buf, &val, 4);
7949 /* disable access to nvram interface */
7950 bnx2x_disable_nvram_access(bp);
7951 bnx2x_release_nvram_lock(bp);
7956 static int bnx2x_get_eeprom(struct net_device *dev,
7957 struct ethtool_eeprom *eeprom, u8 *eebuf)
7959 struct bnx2x *bp = netdev_priv(dev);
7962 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7963 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7964 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7965 eeprom->len, eeprom->len);
7967 /* parameters already validated in ethtool_get_eeprom */
7969 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7974 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7979 /* build the command word */
7980 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7982 /* need to clear DONE bit separately */
7983 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7985 /* write the data */
7986 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7988 /* address of the NVRAM to write to */
7989 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7990 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7992 /* issue the write command */
7993 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7995 /* adjust timeout for emulation/FPGA */
7996 count = NVRAM_TIMEOUT_COUNT;
7997 if (CHIP_REV_IS_SLOW(bp))
8000 /* wait for completion */
8002 for (i = 0; i < count; i++) {
8004 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8005 if (val & MCPR_NVM_COMMAND_DONE) {
8014 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8016 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8024 if (offset + buf_size > bp->common.flash_size) {
8025 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8026 " buf_size (0x%x) > flash_size (0x%x)\n",
8027 offset, buf_size, bp->common.flash_size);
8031 /* request access to nvram interface */
8032 rc = bnx2x_acquire_nvram_lock(bp);
8036 /* enable access to nvram interface */
8037 bnx2x_enable_nvram_access(bp);
8039 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8040 align_offset = (offset & ~0x03);
8041 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8044 val &= ~(0xff << BYTE_OFFSET(offset));
8045 val |= (*data_buf << BYTE_OFFSET(offset));
8047 /* nvram data is returned as an array of bytes
8048 * convert it back to cpu order */
8049 val = be32_to_cpu(val);
8051 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8055 /* disable access to nvram interface */
8056 bnx2x_disable_nvram_access(bp);
8057 bnx2x_release_nvram_lock(bp);
8062 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8070 if (buf_size == 1) /* ethtool */
8071 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8073 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8075 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8080 if (offset + buf_size > bp->common.flash_size) {
8081 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8082 " buf_size (0x%x) > flash_size (0x%x)\n",
8083 offset, buf_size, bp->common.flash_size);
8087 /* request access to nvram interface */
8088 rc = bnx2x_acquire_nvram_lock(bp);
8092 /* enable access to nvram interface */
8093 bnx2x_enable_nvram_access(bp);
8096 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8097 while ((written_so_far < buf_size) && (rc == 0)) {
8098 if (written_so_far == (buf_size - sizeof(u32)))
8099 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8100 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8101 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8102 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8103 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8105 memcpy(&val, data_buf, 4);
8107 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8109 /* advance to the next dword */
8110 offset += sizeof(u32);
8111 data_buf += sizeof(u32);
8112 written_so_far += sizeof(u32);
8116 /* disable access to nvram interface */
8117 bnx2x_disable_nvram_access(bp);
8118 bnx2x_release_nvram_lock(bp);
8123 static int bnx2x_set_eeprom(struct net_device *dev,
8124 struct ethtool_eeprom *eeprom, u8 *eebuf)
8126 struct bnx2x *bp = netdev_priv(dev);
8129 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8130 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8131 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8132 eeprom->len, eeprom->len);
8134 /* parameters already validated in ethtool_set_eeprom */
8136 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8137 if (eeprom->magic == 0x00504859)
8140 bnx2x_phy_hw_lock(bp);
8141 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8142 bp->link_params.ext_phy_config,
8143 (bp->state != BNX2X_STATE_CLOSED),
8144 eebuf, eeprom->len);
8145 if ((bp->state == BNX2X_STATE_OPEN) ||
8146 (bp->state == BNX2X_STATE_DISABLED)) {
8147 rc |= bnx2x_link_reset(&bp->link_params,
8149 rc |= bnx2x_phy_init(&bp->link_params,
8152 bnx2x_phy_hw_unlock(bp);
8154 } else /* Only the PMF can access the PHY */
8157 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8162 static int bnx2x_get_coalesce(struct net_device *dev,
8163 struct ethtool_coalesce *coal)
8165 struct bnx2x *bp = netdev_priv(dev);
8167 memset(coal, 0, sizeof(struct ethtool_coalesce));
8169 coal->rx_coalesce_usecs = bp->rx_ticks;
8170 coal->tx_coalesce_usecs = bp->tx_ticks;
8175 static int bnx2x_set_coalesce(struct net_device *dev,
8176 struct ethtool_coalesce *coal)
8178 struct bnx2x *bp = netdev_priv(dev);
8180 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8181 if (bp->rx_ticks > 3000)
8182 bp->rx_ticks = 3000;
8184 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8185 if (bp->tx_ticks > 0x3000)
8186 bp->tx_ticks = 0x3000;
8188 if (netif_running(dev))
8189 bnx2x_update_coalesce(bp);
8194 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8196 struct bnx2x *bp = netdev_priv(dev);
8200 if (data & ETH_FLAG_LRO) {
8201 if (!(dev->features & NETIF_F_LRO)) {
8202 dev->features |= NETIF_F_LRO;
8203 bp->flags |= TPA_ENABLE_FLAG;
8207 } else if (dev->features & NETIF_F_LRO) {
8208 dev->features &= ~NETIF_F_LRO;
8209 bp->flags &= ~TPA_ENABLE_FLAG;
8213 if (changed && netif_running(dev)) {
8214 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8215 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8221 static void bnx2x_get_ringparam(struct net_device *dev,
8222 struct ethtool_ringparam *ering)
8224 struct bnx2x *bp = netdev_priv(dev);
8226 ering->rx_max_pending = MAX_RX_AVAIL;
8227 ering->rx_mini_max_pending = 0;
8228 ering->rx_jumbo_max_pending = 0;
8230 ering->rx_pending = bp->rx_ring_size;
8231 ering->rx_mini_pending = 0;
8232 ering->rx_jumbo_pending = 0;
8234 ering->tx_max_pending = MAX_TX_AVAIL;
8235 ering->tx_pending = bp->tx_ring_size;
8238 static int bnx2x_set_ringparam(struct net_device *dev,
8239 struct ethtool_ringparam *ering)
8241 struct bnx2x *bp = netdev_priv(dev);
8244 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8245 (ering->tx_pending > MAX_TX_AVAIL) ||
8246 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8249 bp->rx_ring_size = ering->rx_pending;
8250 bp->tx_ring_size = ering->tx_pending;
8252 if (netif_running(dev)) {
8253 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8254 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8260 static void bnx2x_get_pauseparam(struct net_device *dev,
8261 struct ethtool_pauseparam *epause)
8263 struct bnx2x *bp = netdev_priv(dev);
8265 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8266 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8268 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8270 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8273 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8274 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8275 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8278 static int bnx2x_set_pauseparam(struct net_device *dev,
8279 struct ethtool_pauseparam *epause)
8281 struct bnx2x *bp = netdev_priv(dev);
8286 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8287 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8288 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8290 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8292 if (epause->rx_pause)
8293 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8295 if (epause->tx_pause)
8296 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8298 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8299 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8301 if (epause->autoneg) {
8302 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8303 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8307 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8308 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8312 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8314 if (netif_running(dev)) {
8315 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8322 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8324 struct bnx2x *bp = netdev_priv(dev);
8329 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8331 struct bnx2x *bp = netdev_priv(dev);
8337 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8340 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8341 dev->features |= NETIF_F_TSO6;
8343 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8344 dev->features &= ~NETIF_F_TSO6;
8350 static const struct {
8351 char string[ETH_GSTRING_LEN];
8352 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8353 { "register_test (offline)" },
8354 { "memory_test (offline)" },
8355 { "loopback_test (offline)" },
8356 { "nvram_test (online)" },
8357 { "interrupt_test (online)" },
8358 { "link_test (online)" },
8359 { "idle check (online)" },
8360 { "MC errors (online)" }
8363 static int bnx2x_self_test_count(struct net_device *dev)
8365 return BNX2X_NUM_TESTS;
8368 static int bnx2x_test_registers(struct bnx2x *bp)
8370 int idx, i, rc = -ENODEV;
8372 static const struct {
8377 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8378 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8379 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8380 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8381 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8382 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8383 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8384 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8385 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8386 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8387 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8388 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8389 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8390 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8391 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8392 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8393 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8394 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8395 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8396 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8397 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8398 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8399 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8400 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8401 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8402 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8403 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8404 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8405 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8406 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8407 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8408 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8409 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8410 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8411 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8412 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8413 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8414 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8416 { 0xffffffff, 0, 0x00000000 }
8419 if (!netif_running(bp->dev))
8422 /* Repeat the test twice:
8423 First by writing 0x00000000, second by writing 0xffffffff */
8424 for (idx = 0; idx < 2; idx++) {
8431 wr_val = 0xffffffff;
8435 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8436 u32 offset, mask, save_val, val;
8437 int port = BP_PORT(bp);
8439 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8440 mask = reg_tbl[i].mask;
8442 save_val = REG_RD(bp, offset);
8444 REG_WR(bp, offset, wr_val);
8445 val = REG_RD(bp, offset);
8447 /* Restore the original register's value */
8448 REG_WR(bp, offset, save_val);
8450 /* verify that value is as expected value */
8451 if ((val & mask) != (wr_val & mask))
8462 static int bnx2x_test_memory(struct bnx2x *bp)
8464 int i, j, rc = -ENODEV;
8466 static const struct {
8470 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8471 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8472 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8473 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8474 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8475 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8476 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8480 static const struct {
8485 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8486 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8487 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8488 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8489 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8490 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8492 { NULL, 0xffffffff, 0 }
8495 if (!netif_running(bp->dev))
8498 /* Go through all the memories */
8499 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8500 for (j = 0; j < mem_tbl[i].size; j++)
8501 REG_RD(bp, mem_tbl[i].offset + j*4);
8503 /* Check the parity status */
8504 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8505 val = REG_RD(bp, prty_tbl[i].offset);
8506 if (val & ~(prty_tbl[i].mask)) {
8508 "%s is 0x%x\n", prty_tbl[i].name, val);
8519 static void bnx2x_netif_start(struct bnx2x *bp)
8523 if (atomic_dec_and_test(&bp->intr_sem)) {
8524 if (netif_running(bp->dev)) {
8525 bnx2x_int_enable(bp);
8526 for_each_queue(bp, i)
8527 napi_enable(&bnx2x_fp(bp, i, napi));
8528 if (bp->state == BNX2X_STATE_OPEN)
8529 netif_wake_queue(bp->dev);
8534 static void bnx2x_netif_stop(struct bnx2x *bp)
8538 if (netif_running(bp->dev)) {
8539 netif_tx_disable(bp->dev);
8540 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8541 for_each_queue(bp, i)
8542 napi_disable(&bnx2x_fp(bp, i, napi));
8544 bnx2x_int_disable_sync(bp);
8547 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8552 while (bnx2x_link_test(bp) && cnt--)
8556 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8558 unsigned int pkt_size, num_pkts, i;
8559 struct sk_buff *skb;
8560 unsigned char *packet;
8561 struct bnx2x_fastpath *fp = &bp->fp[0];
8562 u16 tx_start_idx, tx_idx;
8563 u16 rx_start_idx, rx_idx;
8565 struct sw_tx_bd *tx_buf;
8566 struct eth_tx_bd *tx_bd;
8568 union eth_rx_cqe *cqe;
8570 struct sw_rx_bd *rx_buf;
8574 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8575 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8576 bnx2x_phy_hw_lock(bp);
8577 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8578 bnx2x_phy_hw_unlock(bp);
8580 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8581 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8582 bnx2x_phy_hw_lock(bp);
8583 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8584 bnx2x_phy_hw_unlock(bp);
8585 /* wait until link state is restored */
8586 bnx2x_wait_for_link(bp, link_up);
8592 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8595 goto test_loopback_exit;
8597 packet = skb_put(skb, pkt_size);
8598 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8599 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8600 for (i = ETH_HLEN; i < pkt_size; i++)
8601 packet[i] = (unsigned char) (i & 0xff);
8604 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8605 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8607 pkt_prod = fp->tx_pkt_prod++;
8608 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8609 tx_buf->first_bd = fp->tx_bd_prod;
8612 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8613 mapping = pci_map_single(bp->pdev, skb->data,
8614 skb_headlen(skb), PCI_DMA_TODEVICE);
8615 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8616 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8617 tx_bd->nbd = cpu_to_le16(1);
8618 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8619 tx_bd->vlan = cpu_to_le16(pkt_prod);
8620 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8621 ETH_TX_BD_FLAGS_END_BD);
8622 tx_bd->general_data = ((UNICAST_ADDRESS <<
8623 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8625 fp->hw_tx_prods->bds_prod =
8626 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8627 mb(); /* FW restriction: must not reorder writing nbd and packets */
8628 fp->hw_tx_prods->packets_prod =
8629 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8630 DOORBELL(bp, FP_IDX(fp), 0);
8636 bp->dev->trans_start = jiffies;
8640 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8641 if (tx_idx != tx_start_idx + num_pkts)
8642 goto test_loopback_exit;
8644 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8645 if (rx_idx != rx_start_idx + num_pkts)
8646 goto test_loopback_exit;
8648 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8649 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8650 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8651 goto test_loopback_rx_exit;
8653 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8654 if (len != pkt_size)
8655 goto test_loopback_rx_exit;
8657 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8659 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8660 for (i = ETH_HLEN; i < pkt_size; i++)
8661 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8662 goto test_loopback_rx_exit;
8666 test_loopback_rx_exit:
8667 bp->dev->last_rx = jiffies;
8669 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8670 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8671 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8672 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8674 /* Update producers */
8675 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8677 mmiowb(); /* keep prod updates ordered */
8680 bp->link_params.loopback_mode = LOOPBACK_NONE;
8685 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8689 if (!netif_running(bp->dev))
8690 return BNX2X_LOOPBACK_FAILED;
8692 bnx2x_netif_stop(bp);
8694 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8695 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8696 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8699 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8700 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8701 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8704 bnx2x_netif_start(bp);
8709 #define CRC32_RESIDUAL 0xdebb20e3
8711 static int bnx2x_test_nvram(struct bnx2x *bp)
8713 static const struct {
8717 { 0, 0x14 }, /* bootstrap */
8718 { 0x14, 0xec }, /* dir */
8719 { 0x100, 0x350 }, /* manuf_info */
8720 { 0x450, 0xf0 }, /* feature_info */
8721 { 0x640, 0x64 }, /* upgrade_key_info */
8723 { 0x708, 0x70 }, /* manuf_key_info */
8728 u8 *data = (u8 *)buf;
8732 rc = bnx2x_nvram_read(bp, 0, data, 4);
8734 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8735 goto test_nvram_exit;
8738 magic = be32_to_cpu(buf[0]);
8739 if (magic != 0x669955aa) {
8740 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8742 goto test_nvram_exit;
8745 for (i = 0; nvram_tbl[i].size; i++) {
8747 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8751 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8752 goto test_nvram_exit;
8755 csum = ether_crc_le(nvram_tbl[i].size, data);
8756 if (csum != CRC32_RESIDUAL) {
8758 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8760 goto test_nvram_exit;
8768 static int bnx2x_test_intr(struct bnx2x *bp)
8770 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8773 if (!netif_running(bp->dev))
8776 config->hdr.length_6b = 0;
8777 config->hdr.offset = 0;
8778 config->hdr.client_id = BP_CL_ID(bp);
8779 config->hdr.reserved1 = 0;
8781 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8782 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8783 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8785 bp->set_mac_pending++;
8786 for (i = 0; i < 10; i++) {
8787 if (!bp->set_mac_pending)
8789 msleep_interruptible(10);
8798 static void bnx2x_self_test(struct net_device *dev,
8799 struct ethtool_test *etest, u64 *buf)
8801 struct bnx2x *bp = netdev_priv(dev);
8803 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8805 if (!netif_running(dev))
8808 /* offline tests are not suppoerted in MF mode */
8810 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8812 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8815 link_up = bp->link_vars.link_up;
8816 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8817 bnx2x_nic_load(bp, LOAD_DIAG);
8818 /* wait until link state is restored */
8819 bnx2x_wait_for_link(bp, link_up);
8821 if (bnx2x_test_registers(bp) != 0) {
8823 etest->flags |= ETH_TEST_FL_FAILED;
8825 if (bnx2x_test_memory(bp) != 0) {
8827 etest->flags |= ETH_TEST_FL_FAILED;
8829 buf[2] = bnx2x_test_loopback(bp, link_up);
8831 etest->flags |= ETH_TEST_FL_FAILED;
8833 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8834 bnx2x_nic_load(bp, LOAD_NORMAL);
8835 /* wait until link state is restored */
8836 bnx2x_wait_for_link(bp, link_up);
8838 if (bnx2x_test_nvram(bp) != 0) {
8840 etest->flags |= ETH_TEST_FL_FAILED;
8842 if (bnx2x_test_intr(bp) != 0) {
8844 etest->flags |= ETH_TEST_FL_FAILED;
8847 if (bnx2x_link_test(bp) != 0) {
8849 etest->flags |= ETH_TEST_FL_FAILED;
8851 buf[7] = bnx2x_mc_assert(bp);
8853 etest->flags |= ETH_TEST_FL_FAILED;
8855 #ifdef BNX2X_EXTRA_DEBUG
8856 bnx2x_panic_dump(bp);
8860 static const struct {
8864 #define STATS_FLAGS_PORT 1
8865 #define STATS_FLAGS_FUNC 2
8866 u8 string[ETH_GSTRING_LEN];
8867 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8868 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8869 8, STATS_FLAGS_FUNC, "rx_bytes" },
8870 { STATS_OFFSET32(error_bytes_received_hi),
8871 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8872 { STATS_OFFSET32(total_bytes_transmitted_hi),
8873 8, STATS_FLAGS_FUNC, "tx_bytes" },
8874 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8875 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8876 { STATS_OFFSET32(total_unicast_packets_received_hi),
8877 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8878 { STATS_OFFSET32(total_multicast_packets_received_hi),
8879 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8880 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8881 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8882 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8883 8, STATS_FLAGS_FUNC, "tx_packets" },
8884 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8885 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8886 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8887 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8888 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8889 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8890 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8891 8, STATS_FLAGS_PORT, "rx_align_errors" },
8892 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8893 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8894 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8895 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8896 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8897 8, STATS_FLAGS_PORT, "tx_deferred" },
8898 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8899 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8900 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8901 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8902 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8903 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8904 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8905 8, STATS_FLAGS_PORT, "rx_fragments" },
8906 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8907 8, STATS_FLAGS_PORT, "rx_jabbers" },
8908 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8909 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8910 { STATS_OFFSET32(jabber_packets_received),
8911 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8912 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8913 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8914 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8915 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8916 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8917 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8918 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8919 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8920 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8921 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8922 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8923 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8924 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8925 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8926 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8927 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8928 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8929 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8930 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8931 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8932 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8933 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8934 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8935 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8936 { STATS_OFFSET32(mac_filter_discard),
8937 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8938 { STATS_OFFSET32(no_buff_discard),
8939 4, STATS_FLAGS_FUNC, "rx_discards" },
8940 { STATS_OFFSET32(xxoverflow_discard),
8941 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8942 { STATS_OFFSET32(brb_drop_hi),
8943 8, STATS_FLAGS_PORT, "brb_discard" },
8944 { STATS_OFFSET32(brb_truncate_hi),
8945 8, STATS_FLAGS_PORT, "brb_truncate" },
8946 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8947 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8948 { STATS_OFFSET32(rx_skb_alloc_failed),
8949 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8950 /* 42 */{ STATS_OFFSET32(hw_csum_err),
8951 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8954 #define IS_NOT_E1HMF_STAT(bp, i) \
8955 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8957 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8959 struct bnx2x *bp = netdev_priv(dev);
8962 switch (stringset) {
8964 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8965 if (IS_NOT_E1HMF_STAT(bp, i))
8967 strcpy(buf + j*ETH_GSTRING_LEN,
8968 bnx2x_stats_arr[i].string);
8974 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8979 static int bnx2x_get_stats_count(struct net_device *dev)
8981 struct bnx2x *bp = netdev_priv(dev);
8982 int i, num_stats = 0;
8984 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8985 if (IS_NOT_E1HMF_STAT(bp, i))
8992 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8993 struct ethtool_stats *stats, u64 *buf)
8995 struct bnx2x *bp = netdev_priv(dev);
8996 u32 *hw_stats = (u32 *)&bp->eth_stats;
8999 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9000 if (IS_NOT_E1HMF_STAT(bp, i))
9003 if (bnx2x_stats_arr[i].size == 0) {
9004 /* skip this counter */
9009 if (bnx2x_stats_arr[i].size == 4) {
9010 /* 4-byte counter */
9011 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9015 /* 8-byte counter */
9016 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9017 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9022 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9024 struct bnx2x *bp = netdev_priv(dev);
9025 int port = BP_PORT(bp);
9028 if (!netif_running(dev))
9037 for (i = 0; i < (data * 2); i++) {
9039 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9040 bp->link_params.hw_led_mode,
9041 bp->link_params.chip_id);
9043 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9044 bp->link_params.hw_led_mode,
9045 bp->link_params.chip_id);
9047 msleep_interruptible(500);
9048 if (signal_pending(current))
9052 if (bp->link_vars.link_up)
9053 bnx2x_set_led(bp, port, LED_MODE_OPER,
9054 bp->link_vars.line_speed,
9055 bp->link_params.hw_led_mode,
9056 bp->link_params.chip_id);
9061 static struct ethtool_ops bnx2x_ethtool_ops = {
9062 .get_settings = bnx2x_get_settings,
9063 .set_settings = bnx2x_set_settings,
9064 .get_drvinfo = bnx2x_get_drvinfo,
9065 .get_wol = bnx2x_get_wol,
9066 .set_wol = bnx2x_set_wol,
9067 .get_msglevel = bnx2x_get_msglevel,
9068 .set_msglevel = bnx2x_set_msglevel,
9069 .nway_reset = bnx2x_nway_reset,
9070 .get_link = ethtool_op_get_link,
9071 .get_eeprom_len = bnx2x_get_eeprom_len,
9072 .get_eeprom = bnx2x_get_eeprom,
9073 .set_eeprom = bnx2x_set_eeprom,
9074 .get_coalesce = bnx2x_get_coalesce,
9075 .set_coalesce = bnx2x_set_coalesce,
9076 .get_ringparam = bnx2x_get_ringparam,
9077 .set_ringparam = bnx2x_set_ringparam,
9078 .get_pauseparam = bnx2x_get_pauseparam,
9079 .set_pauseparam = bnx2x_set_pauseparam,
9080 .get_rx_csum = bnx2x_get_rx_csum,
9081 .set_rx_csum = bnx2x_set_rx_csum,
9082 .get_tx_csum = ethtool_op_get_tx_csum,
9083 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9084 .set_flags = bnx2x_set_flags,
9085 .get_flags = ethtool_op_get_flags,
9086 .get_sg = ethtool_op_get_sg,
9087 .set_sg = ethtool_op_set_sg,
9088 .get_tso = ethtool_op_get_tso,
9089 .set_tso = bnx2x_set_tso,
9090 .self_test_count = bnx2x_self_test_count,
9091 .self_test = bnx2x_self_test,
9092 .get_strings = bnx2x_get_strings,
9093 .phys_id = bnx2x_phys_id,
9094 .get_stats_count = bnx2x_get_stats_count,
9095 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9098 /* end of ethtool_ops */
9100 /****************************************************************************
9101 * General service functions
9102 ****************************************************************************/
9104 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9108 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9112 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9113 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9114 PCI_PM_CTRL_PME_STATUS));
9116 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9117 /* delay required during transition out of D3hot */
9122 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9126 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9128 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9131 /* No more memory access after this point until
9132 * device is brought back to D0.
9143 * net_device service functions
9146 static int bnx2x_poll(struct napi_struct *napi, int budget)
9148 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9150 struct bnx2x *bp = fp->bp;
9153 #ifdef BNX2X_STOP_ON_ERROR
9154 if (unlikely(bp->panic))
9158 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9159 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9160 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9162 bnx2x_update_fpsb_idx(fp);
9164 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9165 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9166 bnx2x_tx_int(fp, budget);
9168 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9169 work_done = bnx2x_rx_int(fp, budget);
9171 rmb(); /* bnx2x_has_work() reads the status block */
9173 /* must not complete if we consumed full budget */
9174 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9176 #ifdef BNX2X_STOP_ON_ERROR
9179 netif_rx_complete(bp->dev, napi);
9181 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9182 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9183 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9184 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9190 /* we split the first BD into headers and data BDs
9191 * to ease the pain of our fellow micocode engineers
9192 * we use one mapping for both BDs
9193 * So far this has only been observed to happen
9194 * in Other Operating Systems(TM)
9196 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9197 struct bnx2x_fastpath *fp,
9198 struct eth_tx_bd **tx_bd, u16 hlen,
9199 u16 bd_prod, int nbd)
9201 struct eth_tx_bd *h_tx_bd = *tx_bd;
9202 struct eth_tx_bd *d_tx_bd;
9204 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9206 /* first fix first BD */
9207 h_tx_bd->nbd = cpu_to_le16(nbd);
9208 h_tx_bd->nbytes = cpu_to_le16(hlen);
9210 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9211 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9212 h_tx_bd->addr_lo, h_tx_bd->nbd);
9214 /* now get a new data BD
9215 * (after the pbd) and fill it */
9216 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9217 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9219 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9220 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9222 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9223 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9224 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9226 /* this marks the BD as one that has no individual mapping
9227 * the FW ignores this flag in a BD not marked start
9229 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9230 DP(NETIF_MSG_TX_QUEUED,
9231 "TSO split data size is %d (%x:%x)\n",
9232 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9234 /* update tx_bd for marking the last BD flag */
9240 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9243 csum = (u16) ~csum_fold(csum_sub(csum,
9244 csum_partial(t_header - fix, fix, 0)));
9247 csum = (u16) ~csum_fold(csum_add(csum,
9248 csum_partial(t_header, -fix, 0)));
9250 return swab16(csum);
9253 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9257 if (skb->ip_summed != CHECKSUM_PARTIAL)
9261 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9263 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9264 rc |= XMIT_CSUM_TCP;
9268 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9269 rc |= XMIT_CSUM_TCP;
9273 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9276 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9282 /* check if packet requires linearization (packet is too fragmented) */
9283 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9288 int first_bd_sz = 0;
9290 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9291 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9293 if (xmit_type & XMIT_GSO) {
9294 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9295 /* Check if LSO packet needs to be copied:
9296 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9297 int wnd_size = MAX_FETCH_BD - 3;
9298 /* Number of widnows to check */
9299 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9304 /* Headers length */
9305 hlen = (int)(skb_transport_header(skb) - skb->data) +
9308 /* Amount of data (w/o headers) on linear part of SKB*/
9309 first_bd_sz = skb_headlen(skb) - hlen;
9311 wnd_sum = first_bd_sz;
9313 /* Calculate the first sum - it's special */
9314 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9316 skb_shinfo(skb)->frags[frag_idx].size;
9318 /* If there was data on linear skb data - check it */
9319 if (first_bd_sz > 0) {
9320 if (unlikely(wnd_sum < lso_mss)) {
9325 wnd_sum -= first_bd_sz;
9328 /* Others are easier: run through the frag list and
9329 check all windows */
9330 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9332 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9334 if (unlikely(wnd_sum < lso_mss)) {
9339 skb_shinfo(skb)->frags[wnd_idx].size;
9343 /* in non-LSO too fragmented packet should always
9350 if (unlikely(to_copy))
9351 DP(NETIF_MSG_TX_QUEUED,
9352 "Linearization IS REQUIRED for %s packet. "
9353 "num_frags %d hlen %d first_bd_sz %d\n",
9354 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9355 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9360 /* called with netif_tx_lock
9361 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9362 * netif_wake_queue()
9364 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9366 struct bnx2x *bp = netdev_priv(dev);
9367 struct bnx2x_fastpath *fp;
9368 struct sw_tx_bd *tx_buf;
9369 struct eth_tx_bd *tx_bd;
9370 struct eth_tx_parse_bd *pbd = NULL;
9371 u16 pkt_prod, bd_prod;
9374 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9375 int vlan_off = (bp->e1hov ? 4 : 0);
9379 #ifdef BNX2X_STOP_ON_ERROR
9380 if (unlikely(bp->panic))
9381 return NETDEV_TX_BUSY;
9384 fp_index = (smp_processor_id() % bp->num_queues);
9385 fp = &bp->fp[fp_index];
9387 if (unlikely(bnx2x_tx_avail(bp->fp) <
9388 (skb_shinfo(skb)->nr_frags + 3))) {
9389 bp->eth_stats.driver_xoff++,
9390 netif_stop_queue(dev);
9391 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9392 return NETDEV_TX_BUSY;
9395 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9396 " gso type %x xmit_type %x\n",
9397 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9398 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9400 /* First, check if we need to linearaize the skb
9401 (due to FW restrictions) */
9402 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9403 /* Statistics of linearization */
9405 if (skb_linearize(skb) != 0) {
9406 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9407 "silently dropping this SKB\n");
9408 dev_kfree_skb_any(skb);
9414 Please read carefully. First we use one BD which we mark as start,
9415 then for TSO or xsum we have a parsing info BD,
9416 and only then we have the rest of the TSO BDs.
9417 (don't forget to mark the last one as last,
9418 and to unmap only AFTER you write to the BD ...)
9419 And above all, all pdb sizes are in words - NOT DWORDS!
9422 pkt_prod = fp->tx_pkt_prod++;
9423 bd_prod = TX_BD(fp->tx_bd_prod);
9425 /* get a tx_buf and first BD */
9426 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9427 tx_bd = &fp->tx_desc_ring[bd_prod];
9429 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9430 tx_bd->general_data = (UNICAST_ADDRESS <<
9431 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9432 tx_bd->general_data |= 1; /* header nbd */
9434 /* remember the first BD of the packet */
9435 tx_buf->first_bd = fp->tx_bd_prod;
9438 DP(NETIF_MSG_TX_QUEUED,
9439 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9440 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9442 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9443 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9444 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9447 tx_bd->vlan = cpu_to_le16(pkt_prod);
9451 /* turn on parsing and get a BD */
9452 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9453 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9455 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9458 if (xmit_type & XMIT_CSUM) {
9459 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9461 /* for now NS flag is not used in Linux */
9462 pbd->global_data = (hlen |
9463 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9464 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9466 pbd->ip_hlen = (skb_transport_header(skb) -
9467 skb_network_header(skb)) / 2;
9469 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9471 pbd->total_hlen = cpu_to_le16(hlen);
9472 hlen = hlen*2 - vlan_off;
9474 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9476 if (xmit_type & XMIT_CSUM_V4)
9477 tx_bd->bd_flags.as_bitfield |=
9478 ETH_TX_BD_FLAGS_IP_CSUM;
9480 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9482 if (xmit_type & XMIT_CSUM_TCP) {
9483 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9486 s8 fix = SKB_CS_OFF(skb); /* signed! */
9488 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9489 pbd->cs_offset = fix / 2;
9491 DP(NETIF_MSG_TX_QUEUED,
9492 "hlen %d offset %d fix %d csum before fix %x\n",
9493 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9496 /* HW bug: fixup the CSUM */
9497 pbd->tcp_pseudo_csum =
9498 bnx2x_csum_fix(skb_transport_header(skb),
9501 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9502 pbd->tcp_pseudo_csum);
9506 mapping = pci_map_single(bp->pdev, skb->data,
9507 skb_headlen(skb), PCI_DMA_TODEVICE);
9509 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9510 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9511 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9512 tx_bd->nbd = cpu_to_le16(nbd);
9513 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9515 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9516 " nbytes %d flags %x vlan %x\n",
9517 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9518 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9519 le16_to_cpu(tx_bd->vlan));
9521 if (xmit_type & XMIT_GSO) {
9523 DP(NETIF_MSG_TX_QUEUED,
9524 "TSO packet len %d hlen %d total len %d tso size %d\n",
9525 skb->len, hlen, skb_headlen(skb),
9526 skb_shinfo(skb)->gso_size);
9528 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9530 if (unlikely(skb_headlen(skb) > hlen))
9531 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9534 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9535 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9536 pbd->tcp_flags = pbd_tcp_flags(skb);
9538 if (xmit_type & XMIT_GSO_V4) {
9539 pbd->ip_id = swab16(ip_hdr(skb)->id);
9540 pbd->tcp_pseudo_csum =
9541 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9543 0, IPPROTO_TCP, 0));
9546 pbd->tcp_pseudo_csum =
9547 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9548 &ipv6_hdr(skb)->daddr,
9549 0, IPPROTO_TCP, 0));
9551 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9554 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9555 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9557 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9558 tx_bd = &fp->tx_desc_ring[bd_prod];
9560 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9561 frag->size, PCI_DMA_TODEVICE);
9563 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9564 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9565 tx_bd->nbytes = cpu_to_le16(frag->size);
9566 tx_bd->vlan = cpu_to_le16(pkt_prod);
9567 tx_bd->bd_flags.as_bitfield = 0;
9569 DP(NETIF_MSG_TX_QUEUED,
9570 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9571 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9572 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9575 /* now at last mark the BD as the last BD */
9576 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9578 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9579 tx_bd, tx_bd->bd_flags.as_bitfield);
9581 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9583 /* now send a tx doorbell, counting the next BD
9584 * if the packet contains or ends with it
9586 if (TX_BD_POFF(bd_prod) < nbd)
9590 DP(NETIF_MSG_TX_QUEUED,
9591 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9592 " tcp_flags %x xsum %x seq %u hlen %u\n",
9593 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9594 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9595 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9597 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9599 fp->hw_tx_prods->bds_prod =
9600 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9601 mb(); /* FW restriction: must not reorder writing nbd and packets */
9602 fp->hw_tx_prods->packets_prod =
9603 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9604 DOORBELL(bp, FP_IDX(fp), 0);
9608 fp->tx_bd_prod += nbd;
9609 dev->trans_start = jiffies;
9611 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9612 netif_stop_queue(dev);
9613 bp->eth_stats.driver_xoff++;
9614 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9615 netif_wake_queue(dev);
9619 return NETDEV_TX_OK;
9622 /* called with rtnl_lock */
9623 static int bnx2x_open(struct net_device *dev)
9625 struct bnx2x *bp = netdev_priv(dev);
9627 bnx2x_set_power_state(bp, PCI_D0);
9629 return bnx2x_nic_load(bp, LOAD_OPEN);
9632 /* called with rtnl_lock */
9633 static int bnx2x_close(struct net_device *dev)
9635 struct bnx2x *bp = netdev_priv(dev);
9637 /* Unload the driver, release IRQs */
9638 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9639 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9640 if (!CHIP_REV_IS_SLOW(bp))
9641 bnx2x_set_power_state(bp, PCI_D3hot);
9646 /* called with netif_tx_lock from set_multicast */
9647 static void bnx2x_set_rx_mode(struct net_device *dev)
9649 struct bnx2x *bp = netdev_priv(dev);
9650 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9651 int port = BP_PORT(bp);
9653 if (bp->state != BNX2X_STATE_OPEN) {
9654 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9658 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9660 if (dev->flags & IFF_PROMISC)
9661 rx_mode = BNX2X_RX_MODE_PROMISC;
9663 else if ((dev->flags & IFF_ALLMULTI) ||
9664 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9665 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9667 else { /* some multicasts */
9668 if (CHIP_IS_E1(bp)) {
9670 struct dev_mc_list *mclist;
9671 struct mac_configuration_cmd *config =
9672 bnx2x_sp(bp, mcast_config);
9674 for (i = 0, mclist = dev->mc_list;
9675 mclist && (i < dev->mc_count);
9676 i++, mclist = mclist->next) {
9678 config->config_table[i].
9679 cam_entry.msb_mac_addr =
9680 swab16(*(u16 *)&mclist->dmi_addr[0]);
9681 config->config_table[i].
9682 cam_entry.middle_mac_addr =
9683 swab16(*(u16 *)&mclist->dmi_addr[2]);
9684 config->config_table[i].
9685 cam_entry.lsb_mac_addr =
9686 swab16(*(u16 *)&mclist->dmi_addr[4]);
9687 config->config_table[i].cam_entry.flags =
9689 config->config_table[i].
9690 target_table_entry.flags = 0;
9691 config->config_table[i].
9692 target_table_entry.client_id = 0;
9693 config->config_table[i].
9694 target_table_entry.vlan_id = 0;
9697 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9698 config->config_table[i].
9699 cam_entry.msb_mac_addr,
9700 config->config_table[i].
9701 cam_entry.middle_mac_addr,
9702 config->config_table[i].
9703 cam_entry.lsb_mac_addr);
9705 old = config->hdr.length_6b;
9707 for (; i < old; i++) {
9708 if (CAM_IS_INVALID(config->
9710 i--; /* already invalidated */
9714 CAM_INVALIDATE(config->
9719 if (CHIP_REV_IS_SLOW(bp))
9720 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9722 offset = BNX2X_MAX_MULTICAST*(1 + port);
9724 config->hdr.length_6b = i;
9725 config->hdr.offset = offset;
9726 config->hdr.client_id = BP_CL_ID(bp);
9727 config->hdr.reserved1 = 0;
9729 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9730 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9731 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9734 /* Accept one or more multicasts */
9735 struct dev_mc_list *mclist;
9736 u32 mc_filter[MC_HASH_SIZE];
9737 u32 crc, bit, regidx;
9740 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9742 for (i = 0, mclist = dev->mc_list;
9743 mclist && (i < dev->mc_count);
9744 i++, mclist = mclist->next) {
9746 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9747 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9748 mclist->dmi_addr[0], mclist->dmi_addr[1],
9749 mclist->dmi_addr[2], mclist->dmi_addr[3],
9750 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9752 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9753 bit = (crc >> 24) & 0xff;
9756 mc_filter[regidx] |= (1 << bit);
9759 for (i = 0; i < MC_HASH_SIZE; i++)
9760 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9765 bp->rx_mode = rx_mode;
9766 bnx2x_set_storm_rx_mode(bp);
9769 /* called with rtnl_lock */
9770 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9772 struct sockaddr *addr = p;
9773 struct bnx2x *bp = netdev_priv(dev);
9775 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9778 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9779 if (netif_running(dev)) {
9781 bnx2x_set_mac_addr_e1(bp);
9783 bnx2x_set_mac_addr_e1h(bp);
9789 /* called with rtnl_lock */
9790 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9792 struct mii_ioctl_data *data = if_mii(ifr);
9793 struct bnx2x *bp = netdev_priv(dev);
9798 data->phy_id = bp->port.phy_addr;
9805 if (!netif_running(dev))
9808 mutex_lock(&bp->port.phy_mutex);
9809 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9810 DEFAULT_PHY_DEV_ADDR,
9811 (data->reg_num & 0x1f), &mii_regval);
9812 data->val_out = mii_regval;
9813 mutex_unlock(&bp->port.phy_mutex);
9818 if (!capable(CAP_NET_ADMIN))
9821 if (!netif_running(dev))
9824 mutex_lock(&bp->port.phy_mutex);
9825 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9826 DEFAULT_PHY_DEV_ADDR,
9827 (data->reg_num & 0x1f), data->val_in);
9828 mutex_unlock(&bp->port.phy_mutex);
9839 /* called with rtnl_lock */
9840 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9842 struct bnx2x *bp = netdev_priv(dev);
9845 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9846 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9849 /* This does not race with packet allocation
9850 * because the actual alloc size is
9851 * only updated as part of load
9855 if (netif_running(dev)) {
9856 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9857 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9863 static void bnx2x_tx_timeout(struct net_device *dev)
9865 struct bnx2x *bp = netdev_priv(dev);
9867 #ifdef BNX2X_STOP_ON_ERROR
9871 /* This allows the netif to be shutdown gracefully before resetting */
9872 schedule_work(&bp->reset_task);
9876 /* called with rtnl_lock */
9877 static void bnx2x_vlan_rx_register(struct net_device *dev,
9878 struct vlan_group *vlgrp)
9880 struct bnx2x *bp = netdev_priv(dev);
9883 if (netif_running(dev))
9884 bnx2x_set_client_config(bp);
9889 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9890 static void poll_bnx2x(struct net_device *dev)
9892 struct bnx2x *bp = netdev_priv(dev);
9894 disable_irq(bp->pdev->irq);
9895 bnx2x_interrupt(bp->pdev->irq, dev);
9896 enable_irq(bp->pdev->irq);
9900 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9901 struct net_device *dev)
9906 SET_NETDEV_DEV(dev, &pdev->dev);
9907 bp = netdev_priv(dev);
9912 bp->func = PCI_FUNC(pdev->devfn);
9914 rc = pci_enable_device(pdev);
9916 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9920 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9921 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9924 goto err_out_disable;
9927 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9928 printk(KERN_ERR PFX "Cannot find second PCI device"
9929 " base address, aborting\n");
9931 goto err_out_disable;
9934 if (atomic_read(&pdev->enable_cnt) == 1) {
9935 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9937 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9939 goto err_out_disable;
9942 pci_set_master(pdev);
9943 pci_save_state(pdev);
9946 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9947 if (bp->pm_cap == 0) {
9948 printk(KERN_ERR PFX "Cannot find power management"
9949 " capability, aborting\n");
9951 goto err_out_release;
9954 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9955 if (bp->pcie_cap == 0) {
9956 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9959 goto err_out_release;
9962 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9963 bp->flags |= USING_DAC_FLAG;
9964 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9965 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9966 " failed, aborting\n");
9968 goto err_out_release;
9971 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9972 printk(KERN_ERR PFX "System does not support DMA,"
9975 goto err_out_release;
9978 dev->mem_start = pci_resource_start(pdev, 0);
9979 dev->base_addr = dev->mem_start;
9980 dev->mem_end = pci_resource_end(pdev, 0);
9982 dev->irq = pdev->irq;
9984 bp->regview = ioremap_nocache(dev->base_addr,
9985 pci_resource_len(pdev, 0));
9987 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9989 goto err_out_release;
9992 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9993 min_t(u64, BNX2X_DB_SIZE,
9994 pci_resource_len(pdev, 2)));
9995 if (!bp->doorbells) {
9996 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10001 bnx2x_set_power_state(bp, PCI_D0);
10003 /* clean indirect addresses */
10004 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10005 PCICFG_VENDOR_ID_OFFSET);
10006 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10007 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10008 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10009 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10011 dev->hard_start_xmit = bnx2x_start_xmit;
10012 dev->watchdog_timeo = TX_TIMEOUT;
10014 dev->ethtool_ops = &bnx2x_ethtool_ops;
10015 dev->open = bnx2x_open;
10016 dev->stop = bnx2x_close;
10017 dev->set_multicast_list = bnx2x_set_rx_mode;
10018 dev->set_mac_address = bnx2x_change_mac_addr;
10019 dev->do_ioctl = bnx2x_ioctl;
10020 dev->change_mtu = bnx2x_change_mtu;
10021 dev->tx_timeout = bnx2x_tx_timeout;
10023 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10025 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10026 dev->poll_controller = poll_bnx2x;
10028 dev->features |= NETIF_F_SG;
10029 dev->features |= NETIF_F_HW_CSUM;
10030 if (bp->flags & USING_DAC_FLAG)
10031 dev->features |= NETIF_F_HIGHDMA;
10033 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10035 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10036 dev->features |= NETIF_F_TSO6;
10042 iounmap(bp->regview);
10043 bp->regview = NULL;
10045 if (bp->doorbells) {
10046 iounmap(bp->doorbells);
10047 bp->doorbells = NULL;
10051 if (atomic_read(&pdev->enable_cnt) == 1)
10052 pci_release_regions(pdev);
10055 pci_disable_device(pdev);
10056 pci_set_drvdata(pdev, NULL);
10062 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10064 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10066 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10070 /* return value of 1=2.5GHz 2=5GHz */
10071 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10073 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10075 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10079 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10080 const struct pci_device_id *ent)
10082 static int version_printed;
10083 struct net_device *dev = NULL;
10086 DECLARE_MAC_BUF(mac);
10088 if (version_printed++ == 0)
10089 printk(KERN_INFO "%s", version);
10091 /* dev zeroed in init_etherdev */
10092 dev = alloc_etherdev(sizeof(*bp));
10094 printk(KERN_ERR PFX "Cannot allocate net device\n");
10098 netif_carrier_off(dev);
10100 bp = netdev_priv(dev);
10101 bp->msglevel = debug;
10103 rc = bnx2x_init_dev(pdev, dev);
10109 rc = register_netdev(dev);
10111 dev_err(&pdev->dev, "Cannot register net device\n");
10112 goto init_one_exit;
10115 pci_set_drvdata(pdev, dev);
10117 rc = bnx2x_init_bp(bp);
10119 unregister_netdev(dev);
10120 goto init_one_exit;
10123 bp->common.name = board_info[ent->driver_data].name;
10124 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10125 " IRQ %d, ", dev->name, bp->common.name,
10126 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10127 bnx2x_get_pcie_width(bp),
10128 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10129 dev->base_addr, bp->pdev->irq);
10130 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10135 iounmap(bp->regview);
10138 iounmap(bp->doorbells);
10142 if (atomic_read(&pdev->enable_cnt) == 1)
10143 pci_release_regions(pdev);
10145 pci_disable_device(pdev);
10146 pci_set_drvdata(pdev, NULL);
10151 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10153 struct net_device *dev = pci_get_drvdata(pdev);
10157 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10160 bp = netdev_priv(dev);
10162 unregister_netdev(dev);
10165 iounmap(bp->regview);
10168 iounmap(bp->doorbells);
10172 if (atomic_read(&pdev->enable_cnt) == 1)
10173 pci_release_regions(pdev);
10175 pci_disable_device(pdev);
10176 pci_set_drvdata(pdev, NULL);
10179 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10181 struct net_device *dev = pci_get_drvdata(pdev);
10185 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10188 bp = netdev_priv(dev);
10192 pci_save_state(pdev);
10194 if (!netif_running(dev)) {
10199 netif_device_detach(dev);
10201 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10203 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10210 static int bnx2x_resume(struct pci_dev *pdev)
10212 struct net_device *dev = pci_get_drvdata(pdev);
10217 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10220 bp = netdev_priv(dev);
10224 pci_restore_state(pdev);
10226 if (!netif_running(dev)) {
10231 bnx2x_set_power_state(bp, PCI_D0);
10232 netif_device_attach(dev);
10234 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10242 * bnx2x_io_error_detected - called when PCI error is detected
10243 * @pdev: Pointer to PCI device
10244 * @state: The current pci connection state
10246 * This function is called after a PCI bus error affecting
10247 * this device has been detected.
10249 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10250 pci_channel_state_t state)
10252 struct net_device *dev = pci_get_drvdata(pdev);
10253 struct bnx2x *bp = netdev_priv(dev);
10257 netif_device_detach(dev);
10259 if (netif_running(dev))
10260 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10262 pci_disable_device(pdev);
10266 /* Request a slot reset */
10267 return PCI_ERS_RESULT_NEED_RESET;
10271 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10272 * @pdev: Pointer to PCI device
10274 * Restart the card from scratch, as if from a cold-boot.
10276 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10278 struct net_device *dev = pci_get_drvdata(pdev);
10279 struct bnx2x *bp = netdev_priv(dev);
10283 if (pci_enable_device(pdev)) {
10284 dev_err(&pdev->dev,
10285 "Cannot re-enable PCI device after reset\n");
10287 return PCI_ERS_RESULT_DISCONNECT;
10290 pci_set_master(pdev);
10291 pci_restore_state(pdev);
10293 if (netif_running(dev))
10294 bnx2x_set_power_state(bp, PCI_D0);
10298 return PCI_ERS_RESULT_RECOVERED;
10302 * bnx2x_io_resume - called when traffic can start flowing again
10303 * @pdev: Pointer to PCI device
10305 * This callback is called when the error recovery driver tells us that
10306 * its OK to resume normal operation.
10308 static void bnx2x_io_resume(struct pci_dev *pdev)
10310 struct net_device *dev = pci_get_drvdata(pdev);
10311 struct bnx2x *bp = netdev_priv(dev);
10315 if (netif_running(dev))
10316 bnx2x_nic_load(bp, LOAD_OPEN);
10318 netif_device_attach(dev);
10323 static struct pci_error_handlers bnx2x_err_handler = {
10324 .error_detected = bnx2x_io_error_detected,
10325 .slot_reset = bnx2x_io_slot_reset,
10326 .resume = bnx2x_io_resume,
10329 static struct pci_driver bnx2x_pci_driver = {
10330 .name = DRV_MODULE_NAME,
10331 .id_table = bnx2x_pci_tbl,
10332 .probe = bnx2x_init_one,
10333 .remove = __devexit_p(bnx2x_remove_one),
10334 .suspend = bnx2x_suspend,
10335 .resume = bnx2x_resume,
10336 .err_handler = &bnx2x_err_handler,
10339 static int __init bnx2x_init(void)
10341 return pci_register_driver(&bnx2x_pci_driver);
10344 static void __exit bnx2x_cleanup(void)
10346 pci_unregister_driver(&bnx2x_pci_driver);
10349 module_init(bnx2x_init);
10350 module_exit(bnx2x_cleanup);