1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
245 BNX2X_ERR("dmae timeout!\n");
251 mutex_unlock(&bp->dmae_mutex);
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 struct dmae_command *dmae = &bp->init_dmae;
257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 mutex_lock(&bp->dmae_mutex);
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 DMAE_CMD_ENDIANITY_DW_SWAP |
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
309 while (*wb_comp != DMAE_COMP_VAL) {
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
318 BNX2X_ERR("dmae timeout!\n");
323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327 mutex_unlock(&bp->dmae_mutex);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
345 REG_RD_DMAE(bp, reg, wb_data, 2);
347 return HILO_U64(wb_data[0], wb_data[1]);
351 static int bnx2x_mc_assert(struct bnx2x *bp)
355 u32 row0, row1, row2, row3;
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
472 static void bnx2x_fw_dump(struct bnx2x *bp)
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 printk(KERN_CONT "%s", (char *)data);
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x *bp)
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507 BNX2X_ERR("begin crash dump -----------------\n");
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
582 " spq_prod_idx(%u)\n",
583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
588 BNX2X_ERR("end crash dump -----------------\n");
591 static void bnx2x_int_enable(struct bnx2x *bp)
593 int port = BP_PORT(bp);
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
611 REG_WR(bp, addr, val);
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
617 val, port, addr, msix);
619 REG_WR(bp, addr, val);
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
626 /* enable nig attention */
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 static void bnx2x_int_disable(struct bnx2x *bp)
638 int port = BP_PORT(bp);
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
655 static void bnx2x_int_disable_sync(struct bnx2x *bp)
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
660 /* disable interrupt handling */
661 atomic_inc(&bp->intr_sem);
662 /* prevent the HW from sending interrupts */
663 bnx2x_int_disable(bp);
665 /* make sure all ISRs are done */
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
673 synchronize_irq(bp->pdev->irq);
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
682 * General service functions
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686 u8 storm, u16 index, u8 op, u8 update)
688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
689 struct igu_ack_register igu_ack;
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
703 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705 struct host_status_block *fpsb = fp->status_blk;
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
720 static u16 bnx2x_ack_int(struct bnx2x *bp)
722 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
723 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
726 result, BAR_IGU_INTMEM + igu_addr);
729 #warning IGU_DEBUG active
731 BNX2X_ERR("read %x from IGU\n", result);
732 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
740 * fast path service functions
743 /* free skb in the packet ring at pos idx
744 * return idx of last bd freed
746 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
749 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
750 struct eth_tx_bd *tx_bd;
751 struct sk_buff *skb = tx_buf->skb;
752 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
755 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
759 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
760 tx_bd = &fp->tx_desc_ring[bd_idx];
761 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
762 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
764 nbd = le16_to_cpu(tx_bd->nbd) - 1;
765 new_cons = nbd + tx_buf->first_bd;
766 #ifdef BNX2X_STOP_ON_ERROR
767 if (nbd > (MAX_SKB_FRAGS + 2)) {
768 BNX2X_ERR("BAD nbd!\n");
773 /* Skip a parse bd and the TSO split header bd
774 since they have no mapping */
776 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
779 ETH_TX_BD_FLAGS_TCP_CSUM |
780 ETH_TX_BD_FLAGS_SW_LSO)) {
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 tx_bd = &fp->tx_desc_ring[bd_idx];
784 /* is this a TSO split header bd? */
785 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
797 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
805 tx_buf->first_bd = 0;
811 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
817 barrier(); /* Tell compiler that prod and cons can change */
818 prod = fp->tx_bd_prod;
819 cons = fp->tx_bd_cons;
821 /* NUM_TX_RINGS = number of "next-page" entries
822 It will be used as a threshold */
823 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
825 #ifdef BNX2X_STOP_ON_ERROR
827 WARN_ON(used > fp->bp->tx_ring_size);
828 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
831 return (s16)(fp->bp->tx_ring_size) - used;
834 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
836 struct bnx2x *bp = fp->bp;
837 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
840 #ifdef BNX2X_STOP_ON_ERROR
841 if (unlikely(bp->panic))
845 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
846 sw_cons = fp->tx_pkt_cons;
848 while (sw_cons != hw_cons) {
851 pkt_cons = TX_BD(sw_cons);
853 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
855 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
856 hw_cons, sw_cons, pkt_cons);
858 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
860 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
863 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
871 fp->tx_pkt_cons = sw_cons;
872 fp->tx_bd_cons = bd_cons;
874 /* Need to make the tx_cons update visible to start_xmit()
875 * before checking for netif_queue_stopped(). Without the
876 * memory barrier, there is a small possibility that start_xmit()
877 * will miss it and cause the queue to be stopped forever.
881 /* TBD need a thresh? */
882 if (unlikely(netif_queue_stopped(bp->dev))) {
884 netif_tx_lock(bp->dev);
886 if (netif_queue_stopped(bp->dev) &&
887 (bp->state == BNX2X_STATE_OPEN) &&
888 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
889 netif_wake_queue(bp->dev);
891 netif_tx_unlock(bp->dev);
895 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
896 union eth_rx_cqe *rr_cqe)
898 struct bnx2x *bp = fp->bp;
899 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
900 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
903 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
904 FP_IDX(fp), cid, command, bp->state,
905 rr_cqe->ramrod_cqe.ramrod_type);
910 switch (command | fp->state) {
911 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
912 BNX2X_FP_STATE_OPENING):
913 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
915 fp->state = BNX2X_FP_STATE_OPEN;
918 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
919 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
921 fp->state = BNX2X_FP_STATE_HALTED;
925 BNX2X_ERR("unexpected MC reply (%d) "
926 "fp->state is %x\n", command, fp->state);
929 mb(); /* force bnx2x_wait_ramrod() to see the change */
933 switch (command | bp->state) {
934 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
935 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
936 bp->state = BNX2X_STATE_OPEN;
939 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
941 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
942 fp->state = BNX2X_FP_STATE_HALTED;
945 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
946 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
947 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
952 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
953 bp->set_mac_pending = 0;
956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
957 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
961 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
965 mb(); /* force bnx2x_wait_ramrod() to see the change */
968 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
969 struct bnx2x_fastpath *fp, u16 index)
971 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
972 struct page *page = sw_buf->page;
973 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
975 /* Skip "next page" elements */
979 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
980 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
981 __free_pages(page, PAGES_PER_SGE_SHIFT);
988 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
989 struct bnx2x_fastpath *fp, int last)
993 for (i = 0; i < last; i++)
994 bnx2x_free_rx_sge(bp, fp, i);
997 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
1000 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1001 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1005 if (unlikely(page == NULL))
1008 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1009 PCI_DMA_FROMDEVICE);
1010 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1011 __free_pages(page, PAGES_PER_SGE_SHIFT);
1015 sw_buf->page = page;
1016 pci_unmap_addr_set(sw_buf, mapping, mapping);
1018 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1019 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1024 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1025 struct bnx2x_fastpath *fp, u16 index)
1027 struct sk_buff *skb;
1028 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1029 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1032 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1033 if (unlikely(skb == NULL))
1036 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1037 PCI_DMA_FROMDEVICE);
1038 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1044 pci_unmap_addr_set(rx_buf, mapping, mapping);
1046 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1047 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1052 /* note that we are not allocating a new skb,
1053 * we are just moving one from cons to prod
1054 * we are not creating a new mapping,
1055 * so there is no need to check for dma_mapping_error().
1057 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1058 struct sk_buff *skb, u16 cons, u16 prod)
1060 struct bnx2x *bp = fp->bp;
1061 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1062 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1063 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1064 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1066 pci_dma_sync_single_for_device(bp->pdev,
1067 pci_unmap_addr(cons_rx_buf, mapping),
1068 bp->rx_offset + RX_COPY_THRESH,
1069 PCI_DMA_FROMDEVICE);
1071 prod_rx_buf->skb = cons_rx_buf->skb;
1072 pci_unmap_addr_set(prod_rx_buf, mapping,
1073 pci_unmap_addr(cons_rx_buf, mapping));
1074 *prod_bd = *cons_bd;
1077 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1080 u16 last_max = fp->last_max_sge;
1082 if (SUB_S16(idx, last_max) > 0)
1083 fp->last_max_sge = idx;
1086 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1090 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1091 int idx = RX_SGE_CNT * i - 1;
1093 for (j = 0; j < 2; j++) {
1094 SGE_MASK_CLEAR_BIT(fp, idx);
1100 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1101 struct eth_fast_path_rx_cqe *fp_cqe)
1103 struct bnx2x *bp = fp->bp;
1104 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1105 le16_to_cpu(fp_cqe->len_on_bd)) >>
1107 u16 last_max, last_elem, first_elem;
1114 /* First mark all used pages */
1115 for (i = 0; i < sge_len; i++)
1116 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1118 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1119 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121 /* Here we assume that the last SGE index is the biggest */
1122 prefetch((void *)(fp->sge_mask));
1123 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1125 last_max = RX_SGE(fp->last_max_sge);
1126 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1127 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1129 /* If ring is not full */
1130 if (last_elem + 1 != first_elem)
1133 /* Now update the prod */
1134 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1135 if (likely(fp->sge_mask[i]))
1138 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1139 delta += RX_SGE_MASK_ELEM_SZ;
1143 fp->rx_sge_prod += delta;
1144 /* clear page-end entries */
1145 bnx2x_clear_sge_mask_next_elems(fp);
1148 DP(NETIF_MSG_RX_STATUS,
1149 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1150 fp->last_max_sge, fp->rx_sge_prod);
1153 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1155 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1156 memset(fp->sge_mask, 0xff,
1157 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1159 /* Clear the two last indeces in the page to 1:
1160 these are the indeces that correspond to the "next" element,
1161 hence will never be indicated and should be removed from
1162 the calculations. */
1163 bnx2x_clear_sge_mask_next_elems(fp);
1166 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1167 struct sk_buff *skb, u16 cons, u16 prod)
1169 struct bnx2x *bp = fp->bp;
1170 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1171 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1172 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 /* move empty skb from pool to prod and map it */
1176 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1177 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1178 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1179 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1181 /* move partial skb from cons to pool (don't unmap yet) */
1182 fp->tpa_pool[queue] = *cons_rx_buf;
1184 /* mark bin state as start - print error if current state != stop */
1185 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1186 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1188 fp->tpa_state[queue] = BNX2X_TPA_START;
1190 /* point prod_bd to new skb */
1191 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1194 #ifdef BNX2X_STOP_ON_ERROR
1195 fp->tpa_queue_used |= (1 << queue);
1196 #ifdef __powerpc64__
1197 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1199 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1201 fp->tpa_queue_used);
1205 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1206 struct sk_buff *skb,
1207 struct eth_fast_path_rx_cqe *fp_cqe,
1210 struct sw_rx_page *rx_pg, old_rx_pg;
1212 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1213 u32 i, frag_len, frag_size, pages;
1217 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1218 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1220 /* This is needed in order to enable forwarding support */
1222 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1223 max(frag_size, (u32)len_on_bd));
1225 #ifdef BNX2X_STOP_ON_ERROR
1226 if (pages > 8*PAGES_PER_SGE) {
1227 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1229 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1230 fp_cqe->pkt_len, len_on_bd);
1236 /* Run through the SGL and compose the fragmented skb */
1237 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1238 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1240 /* FW gives the indices of the SGE as if the ring is an array
1241 (meaning that "next" element will consume 2 indices) */
1242 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1243 rx_pg = &fp->rx_page_ring[sge_idx];
1247 /* If we fail to allocate a substitute page, we simply stop
1248 where we are and drop the whole packet */
1249 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1250 if (unlikely(err)) {
1251 bp->eth_stats.rx_skb_alloc_failed++;
1255 /* Unmap the page as we r going to pass it to the stack */
1256 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1257 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1259 /* Add one frag and update the appropriate fields in the skb */
1260 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1262 skb->data_len += frag_len;
1263 skb->truesize += frag_len;
1264 skb->len += frag_len;
1266 frag_size -= frag_len;
1272 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1276 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1277 struct sk_buff *skb = rx_buf->skb;
1279 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1281 /* Unmap skb in the pool anyway, as we are going to change
1282 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1284 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1285 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1287 if (likely(new_skb)) {
1288 /* fix ip xsum and give it to the stack */
1289 /* (no need to map the new skb) */
1292 prefetch(((char *)(skb)) + 128);
1294 #ifdef BNX2X_STOP_ON_ERROR
1295 if (pad + len > bp->rx_buf_size) {
1296 BNX2X_ERR("skb_put is about to fail... "
1297 "pad %d len %d rx_buf_size %d\n",
1298 pad, len, bp->rx_buf_size);
1304 skb_reserve(skb, pad);
1307 skb->protocol = eth_type_trans(skb, bp->dev);
1308 skb->ip_summed = CHECKSUM_UNNECESSARY;
1313 iph = (struct iphdr *)skb->data;
1315 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1318 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1319 &cqe->fast_path_cqe, cqe_idx)) {
1321 if ((bp->vlgrp != NULL) &&
1322 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1323 PARSING_FLAGS_VLAN))
1324 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1325 le16_to_cpu(cqe->fast_path_cqe.
1329 netif_receive_skb(skb);
1331 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1332 " - dropping packet!\n");
1336 bp->dev->last_rx = jiffies;
1338 /* put new skb in bin */
1339 fp->tpa_pool[queue].skb = new_skb;
1342 /* else drop the packet and keep the buffer in the bin */
1343 DP(NETIF_MSG_RX_STATUS,
1344 "Failed to allocate new skb - dropping packet!\n");
1345 bp->eth_stats.rx_skb_alloc_failed++;
1348 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1351 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1352 struct bnx2x_fastpath *fp,
1353 u16 bd_prod, u16 rx_comp_prod,
1356 struct tstorm_eth_rx_producers rx_prods = {0};
1359 /* Update producers */
1360 rx_prods.bd_prod = bd_prod;
1361 rx_prods.cqe_prod = rx_comp_prod;
1362 rx_prods.sge_prod = rx_sge_prod;
1364 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1365 REG_WR(bp, BAR_TSTRORM_INTMEM +
1366 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1367 ((u32 *)&rx_prods)[i]);
1369 DP(NETIF_MSG_RX_STATUS,
1370 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1371 bd_prod, rx_comp_prod, rx_sge_prod);
1374 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1376 struct bnx2x *bp = fp->bp;
1377 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1378 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1382 #ifdef BNX2X_STOP_ON_ERROR
1383 if (unlikely(bp->panic))
1387 /* CQ "next element" is of the size of the regular element,
1388 that's why it's ok here */
1389 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1390 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1393 bd_cons = fp->rx_bd_cons;
1394 bd_prod = fp->rx_bd_prod;
1395 bd_prod_fw = bd_prod;
1396 sw_comp_cons = fp->rx_comp_cons;
1397 sw_comp_prod = fp->rx_comp_prod;
1399 /* Memory barrier necessary as speculative reads of the rx
1400 * buffer can be ahead of the index in the status block
1404 DP(NETIF_MSG_RX_STATUS,
1405 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1406 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1408 while (sw_comp_cons != hw_comp_cons) {
1409 struct sw_rx_bd *rx_buf = NULL;
1410 struct sk_buff *skb;
1411 union eth_rx_cqe *cqe;
1415 comp_ring_cons = RCQ_BD(sw_comp_cons);
1416 bd_prod = RX_BD(bd_prod);
1417 bd_cons = RX_BD(bd_cons);
1419 cqe = &fp->rx_comp_ring[comp_ring_cons];
1420 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1422 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1423 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1424 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1425 cqe->fast_path_cqe.rss_hash_result,
1426 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1427 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1429 /* is this a slowpath msg? */
1430 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1431 bnx2x_sp_event(fp, cqe);
1434 /* this is an rx packet */
1436 rx_buf = &fp->rx_buf_ring[bd_cons];
1438 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1439 pad = cqe->fast_path_cqe.placement_offset;
1441 /* If CQE is marked both TPA_START and TPA_END
1442 it is a non-TPA CQE */
1443 if ((!fp->disable_tpa) &&
1444 (TPA_TYPE(cqe_fp_flags) !=
1445 (TPA_TYPE_START | TPA_TYPE_END))) {
1446 queue = cqe->fast_path_cqe.queue_index;
1448 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1449 DP(NETIF_MSG_RX_STATUS,
1450 "calling tpa_start on queue %d\n",
1453 bnx2x_tpa_start(fp, queue, skb,
1458 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1459 DP(NETIF_MSG_RX_STATUS,
1460 "calling tpa_stop on queue %d\n",
1463 if (!BNX2X_RX_SUM_FIX(cqe))
1464 BNX2X_ERR("STOP on none TCP "
1467 /* This is a size of the linear data
1469 len = le16_to_cpu(cqe->fast_path_cqe.
1471 bnx2x_tpa_stop(bp, fp, queue, pad,
1472 len, cqe, comp_ring_cons);
1473 #ifdef BNX2X_STOP_ON_ERROR
1478 bnx2x_update_sge_prod(fp,
1479 &cqe->fast_path_cqe);
1484 pci_dma_sync_single_for_device(bp->pdev,
1485 pci_unmap_addr(rx_buf, mapping),
1486 pad + RX_COPY_THRESH,
1487 PCI_DMA_FROMDEVICE);
1489 prefetch(((char *)(skb)) + 128);
1491 /* is this an error packet? */
1492 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1493 DP(NETIF_MSG_RX_ERR,
1494 "ERROR flags %x rx packet %u\n",
1495 cqe_fp_flags, sw_comp_cons);
1496 bp->eth_stats.rx_err_discard_pkt++;
1500 /* Since we don't have a jumbo ring
1501 * copy small packets if mtu > 1500
1503 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1504 (len <= RX_COPY_THRESH)) {
1505 struct sk_buff *new_skb;
1507 new_skb = netdev_alloc_skb(bp->dev,
1509 if (new_skb == NULL) {
1510 DP(NETIF_MSG_RX_ERR,
1511 "ERROR packet dropped "
1512 "because of alloc failure\n");
1513 bp->eth_stats.rx_skb_alloc_failed++;
1518 skb_copy_from_linear_data_offset(skb, pad,
1519 new_skb->data + pad, len);
1520 skb_reserve(new_skb, pad);
1521 skb_put(new_skb, len);
1523 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1527 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1528 pci_unmap_single(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 bp->rx_buf_use_size,
1531 PCI_DMA_FROMDEVICE);
1532 skb_reserve(skb, pad);
1536 DP(NETIF_MSG_RX_ERR,
1537 "ERROR packet dropped because "
1538 "of alloc failure\n");
1539 bp->eth_stats.rx_skb_alloc_failed++;
1541 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1545 skb->protocol = eth_type_trans(skb, bp->dev);
1547 skb->ip_summed = CHECKSUM_NONE;
1549 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
1552 bp->eth_stats.hw_csum_err++;
1557 if ((bp->vlgrp != NULL) &&
1558 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1559 PARSING_FLAGS_VLAN))
1560 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1561 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1564 netif_receive_skb(skb);
1566 bp->dev->last_rx = jiffies;
1571 bd_cons = NEXT_RX_IDX(bd_cons);
1572 bd_prod = NEXT_RX_IDX(bd_prod);
1573 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1576 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1577 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1579 if (rx_pkt == budget)
1583 fp->rx_bd_cons = bd_cons;
1584 fp->rx_bd_prod = bd_prod_fw;
1585 fp->rx_comp_cons = sw_comp_cons;
1586 fp->rx_comp_prod = sw_comp_prod;
1588 /* Update producers */
1589 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1591 mmiowb(); /* keep prod updates ordered */
1593 fp->rx_pkt += rx_pkt;
1599 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1601 struct bnx2x_fastpath *fp = fp_cookie;
1602 struct bnx2x *bp = fp->bp;
1603 struct net_device *dev = bp->dev;
1604 int index = FP_IDX(fp);
1606 /* Return here if interrupt is disabled */
1607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1613 index, FP_SB_ID(fp));
1614 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1616 #ifdef BNX2X_STOP_ON_ERROR
1617 if (unlikely(bp->panic))
1621 prefetch(fp->rx_cons_sb);
1622 prefetch(fp->tx_cons_sb);
1623 prefetch(&fp->status_blk->c_status_block.status_block_index);
1624 prefetch(&fp->status_blk->u_status_block.status_block_index);
1626 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1631 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1633 struct net_device *dev = dev_instance;
1634 struct bnx2x *bp = netdev_priv(dev);
1635 u16 status = bnx2x_ack_int(bp);
1638 /* Return here if interrupt is shared and it's not for us */
1639 if (unlikely(status == 0)) {
1640 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1643 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1645 #ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1650 /* Return here if interrupt is disabled */
1651 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1652 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656 mask = 0x2 << bp->fp[0].sb_id;
1657 if (status & mask) {
1658 struct bnx2x_fastpath *fp = &bp->fp[0];
1660 prefetch(fp->rx_cons_sb);
1661 prefetch(fp->tx_cons_sb);
1662 prefetch(&fp->status_blk->c_status_block.status_block_index);
1663 prefetch(&fp->status_blk->u_status_block.status_block_index);
1665 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1671 if (unlikely(status & 0x1)) {
1672 schedule_work(&bp->sp_task);
1680 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1686 /* end of fast path */
1688 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1693 * General service functions
1696 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1699 u32 resource_bit = (1 << resource);
1700 int func = BP_FUNC(bp);
1701 u32 hw_lock_control_reg;
1704 /* Validating that the resource is within range */
1705 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1707 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1708 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1713 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1715 hw_lock_control_reg =
1716 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1719 /* Validating that the resource is not already taken */
1720 lock_status = REG_RD(bp, hw_lock_control_reg);
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
1730 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1731 lock_status = REG_RD(bp, hw_lock_control_reg);
1732 if (lock_status & resource_bit)
1737 DP(NETIF_MSG_HW, "Timeout\n");
1741 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1744 u32 resource_bit = (1 << resource);
1745 int func = BP_FUNC(bp);
1746 u32 hw_lock_control_reg;
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1757 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1759 hw_lock_control_reg =
1760 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1763 /* Validating that the resource is currently taken */
1764 lock_status = REG_RD(bp, hw_lock_control_reg);
1765 if (!(lock_status & resource_bit)) {
1766 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1767 lock_status, resource_bit);
1771 REG_WR(bp, hw_lock_control_reg, resource_bit);
1775 /* HW Lock for shared dual port PHYs */
1776 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1778 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1780 mutex_lock(&bp->port.phy_mutex);
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1789 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1791 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1792 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1795 mutex_unlock(&bp->port.phy_mutex);
1798 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1800 /* The GPIO should be swapped if swap register is set and active */
1801 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1802 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1803 int gpio_shift = gpio_num +
1804 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1805 u32 gpio_mask = (1 << gpio_shift);
1808 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1809 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1814 /* read GPIO and mask except the float bits */
1815 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1818 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1819 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1820 gpio_num, gpio_shift);
1821 /* clear FLOAT and set CLR */
1822 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1823 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1826 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1827 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1828 gpio_num, gpio_shift);
1829 /* clear FLOAT and set SET */
1830 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1834 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1835 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1836 gpio_num, gpio_shift);
1838 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1845 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1851 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1853 u32 spio_mask = (1 << spio_num);
1856 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1857 (spio_num > MISC_REGISTERS_SPIO_7)) {
1858 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1863 /* read SPIO and mask except the float bits */
1864 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1867 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1869 /* clear FLOAT and set CLR */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1874 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1876 /* clear FLOAT and set SET */
1877 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1881 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1882 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1892 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1897 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1899 switch (bp->link_vars.ieee_fc) {
1900 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1901 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1905 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1909 bp->port.advertising |= ADVERTISED_Asym_Pause;
1912 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1918 static void bnx2x_link_report(struct bnx2x *bp)
1920 if (bp->link_vars.link_up) {
1921 if (bp->state == BNX2X_STATE_OPEN)
1922 netif_carrier_on(bp->dev);
1923 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1925 printk("%d Mbps ", bp->link_vars.line_speed);
1927 if (bp->link_vars.duplex == DUPLEX_FULL)
1928 printk("full duplex");
1930 printk("half duplex");
1932 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1933 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1934 printk(", receive ");
1935 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1936 printk("& transmit ");
1938 printk(", transmit ");
1940 printk("flow control ON");
1944 } else { /* link_down */
1945 netif_carrier_off(bp->dev);
1946 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1950 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1952 if (!BP_NOMCP(bp)) {
1955 /* Initialize link parameters structure variables */
1956 bp->link_params.mtu = bp->dev->mtu;
1958 bnx2x_acquire_phy_lock(bp);
1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960 bnx2x_release_phy_lock(bp);
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
1965 bnx2x_calc_fc_adv(bp);
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1973 static void bnx2x_link_set(struct bnx2x *bp)
1975 if (!BP_NOMCP(bp)) {
1976 bnx2x_acquire_phy_lock(bp);
1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978 bnx2x_release_phy_lock(bp);
1980 bnx2x_calc_fc_adv(bp);
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1987 if (!BP_NOMCP(bp)) {
1988 bnx2x_acquire_phy_lock(bp);
1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1999 bnx2x_acquire_phy_lock(bp);
2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001 bnx2x_release_phy_lock(bp);
2006 /* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2012 0 - if all the min_rates are 0.
2013 In the later case fainess algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019 int i, port = BP_PORT(bp);
2023 for (i = 0; i < E1HVN_MAX; i++) {
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2031 vn_min_rate = DEF_MIN_RATE;
2035 wsum += vn_min_rate;
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2049 struct cmng_struct_per_port *m_cmng_port)
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057 /* Enable minmax only if we are in e1hmf mode */
2059 u32 fair_periodic_timeout_usec;
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2106 "Single function mode minmax will be disabled\n");
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirment of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156 #ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2167 protocol_counters[protocol].rate/8));
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2183 #ifdef BNX2X_PER_PROT_QOS
2185 u32 protocolWeightSum = 0;
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225 bnx2x_acquire_phy_lock(bp);
2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227 bnx2x_release_phy_lock(bp);
2229 if (bp->link_vars.link_up) {
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2254 func = ((vn << 1) | BP_PORT(bp));
2256 /* Set the attention towards other drivers
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2266 int port = BP_PORT(bp);
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2283 if (bp->state != BNX2X_STATE_OPEN)
2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2299 int port = BP_PORT(bp);
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2318 * General service functions
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2325 int func = BP_FUNC(bp);
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333 #ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2338 spin_lock_bh(&bp->spq_lock);
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
2342 spin_unlock_bh(&bp->spq_lock);
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2374 spin_unlock_bh(&bp->spq_lock);
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2386 for (j = 0; j < i*10; j++) {
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2395 if (!(val & (1L << 31))) {
2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2416 barrier(); /* status block is written to by the chip */
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2441 * slow path service functions
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446 int port = BP_PORT(bp);
2447 int func = BP_FUNC(bp);
2448 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
2455 if (bp->attn_state & asserted)
2456 BNX2X_ERR("IGU ERROR\n");
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2462 aeu_mask, asserted);
2463 aeu_mask &= ~(asserted & 0xff);
2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2470 bp->attn_state |= asserted;
2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2474 if (asserted & ATTN_NIG_FOR_FUNC) {
2476 /* save nig interrupt mask */
2477 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478 REG_WR(bp, nig_int_mask_addr, 0);
2480 bnx2x_link_attn(bp);
2482 /* handle unicore attn? */
2484 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2487 if (asserted & GPIO_2_FUNC)
2488 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2490 if (asserted & GPIO_3_FUNC)
2491 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2493 if (asserted & GPIO_4_FUNC)
2494 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2497 if (asserted & ATTN_GENERAL_ATTN_1) {
2498 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2501 if (asserted & ATTN_GENERAL_ATTN_2) {
2502 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2505 if (asserted & ATTN_GENERAL_ATTN_3) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2510 if (asserted & ATTN_GENERAL_ATTN_4) {
2511 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2514 if (asserted & ATTN_GENERAL_ATTN_5) {
2515 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2518 if (asserted & ATTN_GENERAL_ATTN_6) {
2519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2524 } /* if hardwired */
2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2527 asserted, BAR_IGU_INTMEM + igu_addr);
2528 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2530 /* now set back the mask */
2531 if (asserted & ATTN_NIG_FOR_FUNC)
2532 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2535 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2537 int port = BP_PORT(bp);
2541 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2544 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2546 val = REG_RD(bp, reg_offset);
2547 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548 REG_WR(bp, reg_offset, val);
2550 BNX2X_ERR("SPIO5 hw attention\n");
2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554 /* Fan failure attention */
2556 /* The PHY reset is controled by GPIO 1 */
2557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2558 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2559 /* Low power mode is controled by GPIO 2 */
2560 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2561 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2562 /* mark the failure */
2563 bp->link_params.ext_phy_config &=
2564 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2565 bp->link_params.ext_phy_config |=
2566 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2568 dev_info.port_hw_config[port].
2569 external_phy_config,
2570 bp->link_params.ext_phy_config);
2571 /* log the failure */
2572 printk(KERN_ERR PFX "Fan Failure on Network"
2573 " Controller %s has caused the driver to"
2574 " shutdown the card to prevent permanent"
2575 " damage. Please contact Dell Support for"
2576 " assistance\n", bp->dev->name);
2584 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2586 val = REG_RD(bp, reg_offset);
2587 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588 REG_WR(bp, reg_offset, val);
2590 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591 (attn & HW_INTERRUT_ASSERT_SET_0));
2596 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2600 if (attn & BNX2X_DOORQ_ASSERT) {
2602 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604 /* DORQ discard attention */
2606 BNX2X_ERR("FATAL error from DORQ\n");
2609 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2611 int port = BP_PORT(bp);
2614 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2617 val = REG_RD(bp, reg_offset);
2618 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619 REG_WR(bp, reg_offset, val);
2621 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622 (attn & HW_INTERRUT_ASSERT_SET_1));
2627 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2631 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2633 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635 /* CFC error attention */
2637 BNX2X_ERR("FATAL error from CFC\n");
2640 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2642 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644 /* RQ_USDMDP_FIFO_OVERFLOW */
2646 BNX2X_ERR("FATAL error from PXP\n");
2649 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2651 int port = BP_PORT(bp);
2654 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2657 val = REG_RD(bp, reg_offset);
2658 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659 REG_WR(bp, reg_offset, val);
2661 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662 (attn & HW_INTERRUT_ASSERT_SET_2));
2667 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2671 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2673 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674 int func = BP_FUNC(bp);
2676 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677 bnx2x__link_status_update(bp);
2678 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2680 bnx2x_pmf_update(bp);
2682 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2684 BNX2X_ERR("MC assert!\n");
2685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2691 } else if (attn & BNX2X_MCP_ASSERT) {
2693 BNX2X_ERR("MCP assert!\n");
2694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2698 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2701 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2702 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703 if (attn & BNX2X_GRC_TIMEOUT) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2708 if (attn & BNX2X_GRC_RSV) {
2709 val = CHIP_IS_E1H(bp) ?
2710 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2713 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2717 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2719 struct attn_route attn;
2720 struct attn_route group_mask;
2721 int port = BP_PORT(bp);
2727 /* need to take HW lock because MCP or other port might also
2728 try to handle this event */
2729 bnx2x_acquire_alr(bp);
2731 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2735 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2738 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739 if (deasserted & (1 << index)) {
2740 group_mask = bp->attn_group[index];
2742 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743 index, group_mask.sig[0], group_mask.sig[1],
2744 group_mask.sig[2], group_mask.sig[3]);
2746 bnx2x_attn_int_deasserted3(bp,
2747 attn.sig[3] & group_mask.sig[3]);
2748 bnx2x_attn_int_deasserted1(bp,
2749 attn.sig[1] & group_mask.sig[1]);
2750 bnx2x_attn_int_deasserted2(bp,
2751 attn.sig[2] & group_mask.sig[2]);
2752 bnx2x_attn_int_deasserted0(bp,
2753 attn.sig[0] & group_mask.sig[0]);
2755 if ((attn.sig[0] & group_mask.sig[0] &
2756 HW_PRTY_ASSERT_SET_0) ||
2757 (attn.sig[1] & group_mask.sig[1] &
2758 HW_PRTY_ASSERT_SET_1) ||
2759 (attn.sig[2] & group_mask.sig[2] &
2760 HW_PRTY_ASSERT_SET_2))
2761 BNX2X_ERR("FATAL HW block parity attention\n");
2765 bnx2x_release_alr(bp);
2767 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2770 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2772 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2774 if (~bp->attn_state & deasserted)
2775 BNX2X_ERR("IGU ERROR\n");
2777 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2780 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781 aeu_mask = REG_RD(bp, reg_addr);
2783 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2784 aeu_mask, deasserted);
2785 aeu_mask |= (deasserted & 0xff);
2786 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2788 REG_WR(bp, reg_addr, aeu_mask);
2789 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2791 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792 bp->attn_state &= ~deasserted;
2793 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2796 static void bnx2x_attn_int(struct bnx2x *bp)
2798 /* read local copy of bits */
2799 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801 u32 attn_state = bp->attn_state;
2803 /* look for changed bits */
2804 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2805 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2808 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2809 attn_bits, attn_ack, asserted, deasserted);
2811 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2812 BNX2X_ERR("BAD attention state\n");
2814 /* handle bits that were raised */
2816 bnx2x_attn_int_asserted(bp, asserted);
2819 bnx2x_attn_int_deasserted(bp, deasserted);
2822 static void bnx2x_sp_task(struct work_struct *work)
2824 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2828 /* Return here if interrupt is disabled */
2829 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2830 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2834 status = bnx2x_update_dsb_idx(bp);
2835 /* if (status == 0) */
2836 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2838 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2844 /* CStorm events: query_stats, port delete ramrod */
2846 bp->stats_pending = 0;
2848 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2850 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2852 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2854 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2856 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2861 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2863 struct net_device *dev = dev_instance;
2864 struct bnx2x *bp = netdev_priv(dev);
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2872 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2874 #ifdef BNX2X_STOP_ON_ERROR
2875 if (unlikely(bp->panic))
2879 schedule_work(&bp->sp_task);
2884 /* end of slow path */
2888 /****************************************************************************
2890 ****************************************************************************/
2892 /* sum[hi:lo] += add[hi:lo] */
2893 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2896 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2899 /* difference = minuend - subtrahend */
2900 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2902 if (m_lo < s_lo) { \
2904 d_hi = m_hi - s_hi; \
2906 /* we can 'loan' 1 */ \
2908 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2910 /* m_hi <= s_hi */ \
2915 /* m_lo >= s_lo */ \
2916 if (m_hi < s_hi) { \
2920 /* m_hi >= s_hi */ \
2921 d_hi = m_hi - s_hi; \
2922 d_lo = m_lo - s_lo; \
2927 #define UPDATE_STAT64(s, t) \
2929 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934 pstats->mac_stx[1].t##_lo, diff.lo); \
2937 #define UPDATE_STAT64_NIG(s, t) \
2939 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940 diff.lo, new->s##_lo, old->s##_lo); \
2941 ADD_64(estats->t##_hi, diff.hi, \
2942 estats->t##_lo, diff.lo); \
2945 /* sum[hi:lo] += add */
2946 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2949 s_hi += (s_lo < a) ? 1 : 0; \
2952 #define UPDATE_EXTEND_STAT(s) \
2954 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955 pstats->mac_stx[1].s##_lo, \
2959 #define UPDATE_EXTEND_TSTAT(s, t) \
2961 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962 old_tclient->s = le32_to_cpu(tclient->s); \
2963 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2966 #define UPDATE_EXTEND_XSTAT(s, t) \
2968 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969 old_xclient->s = le32_to_cpu(xclient->s); \
2970 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2974 * General service functions
2977 static inline long bnx2x_hilo(u32 *hiref)
2979 u32 lo = *(hiref + 1);
2980 #if (BITS_PER_LONG == 64)
2983 return HILO_U64(hi, lo);
2990 * Init service functions
2993 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2995 if (!bp->stats_pending) {
2996 struct eth_query_ramrod_data ramrod_data = {0};
2999 ramrod_data.drv_counter = bp->stats_counter++;
3000 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3003 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004 ((u32 *)&ramrod_data)[1],
3005 ((u32 *)&ramrod_data)[0], 0);
3007 /* stats ramrod has it's own slot on the spq */
3009 bp->stats_pending = 1;
3014 static void bnx2x_stats_init(struct bnx2x *bp)
3016 int port = BP_PORT(bp);
3018 bp->executer_idx = 0;
3019 bp->stats_counter = 0;
3023 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3025 bp->port.port_stx = 0;
3026 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3028 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029 bp->port.old_nig_stats.brb_discard =
3030 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3031 bp->port.old_nig_stats.brb_truncate =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3033 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3038 /* function stats */
3039 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3044 bp->stats_state = STATS_STATE_DISABLED;
3045 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3049 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3051 struct dmae_command *dmae = &bp->stats_dmae;
3052 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3054 *stats_comp = DMAE_COMP_VAL;
3057 if (bp->executer_idx) {
3058 int loader_idx = PMF_DMAE_C(bp);
3060 memset(dmae, 0, sizeof(struct dmae_command));
3062 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064 DMAE_CMD_DST_RESET |
3066 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3068 DMAE_CMD_ENDIANITY_DW_SWAP |
3070 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3072 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076 sizeof(struct dmae_command) *
3077 (loader_idx + 1)) >> 2;
3078 dmae->dst_addr_hi = 0;
3079 dmae->len = sizeof(struct dmae_command) >> 2;
3082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083 dmae->comp_addr_hi = 0;
3087 bnx2x_post_dmae(bp, dmae, loader_idx);
3089 } else if (bp->func_stx) {
3091 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3095 static int bnx2x_stats_comp(struct bnx2x *bp)
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3101 while (*stats_comp != DMAE_COMP_VAL) {
3104 BNX2X_ERR("timeout waiting for stats finished\n");
3113 * Statistics service functions
3116 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3118 struct dmae_command *dmae;
3120 int loader_idx = PMF_DMAE_C(bp);
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3124 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125 BNX2X_ERR("BUG!\n");
3129 bp->executer_idx = 0;
3131 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3133 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3135 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137 DMAE_CMD_ENDIANITY_DW_SWAP |
3139 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3142 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144 dmae->src_addr_lo = bp->port.port_stx >> 2;
3145 dmae->src_addr_hi = 0;
3146 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->len = DMAE_LEN32_RD_MAX;
3149 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150 dmae->comp_addr_hi = 0;
3153 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156 dmae->src_addr_hi = 0;
3157 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158 DMAE_LEN32_RD_MAX * 4);
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
3161 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_val = DMAE_COMP_VAL;
3167 bnx2x_hw_stats_post(bp);
3168 bnx2x_stats_comp(bp);
3171 static void bnx2x_port_stats_init(struct bnx2x *bp)
3173 struct dmae_command *dmae;
3174 int port = BP_PORT(bp);
3175 int vn = BP_E1HVN(bp);
3177 int loader_idx = PMF_DMAE_C(bp);
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3182 if (!bp->link_vars.link_up || !bp->port.pmf) {
3183 BNX2X_ERR("BUG!\n");
3187 bp->executer_idx = 0;
3190 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3194 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3196 DMAE_CMD_ENDIANITY_DW_SWAP |
3198 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199 (vn << DMAE_CMD_E1HVN_SHIFT));
3201 if (bp->port.port_stx) {
3203 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204 dmae->opcode = opcode;
3205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3208 dmae->dst_addr_hi = 0;
3209 dmae->len = sizeof(struct host_port_stats) >> 2;
3210 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211 dmae->comp_addr_hi = 0;
3217 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218 dmae->opcode = opcode;
3219 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->dst_addr_lo = bp->func_stx >> 2;
3222 dmae->dst_addr_hi = 0;
3223 dmae->len = sizeof(struct host_func_stats) >> 2;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225 dmae->comp_addr_hi = 0;
3230 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236 DMAE_CMD_ENDIANITY_DW_SWAP |
3238 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239 (vn << DMAE_CMD_E1HVN_SHIFT));
3241 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3243 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244 NIG_REG_INGRESS_BMAC0_MEM);
3246 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247 BIGMAC_REGISTER_TX_STAT_GTBYT */
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
3250 dmae->src_addr_lo = (mac_addr +
3251 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252 dmae->src_addr_hi = 0;
3253 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258 dmae->comp_addr_hi = 0;
3261 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264 dmae->opcode = opcode;
3265 dmae->src_addr_lo = (mac_addr +
3266 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267 dmae->src_addr_hi = 0;
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3269 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3272 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275 dmae->comp_addr_hi = 0;
3278 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3280 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3282 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284 dmae->opcode = opcode;
3285 dmae->src_addr_lo = (mac_addr +
3286 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3295 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = opcode;
3298 dmae->src_addr_lo = (mac_addr +
3299 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300 dmae->src_addr_hi = 0;
3301 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3302 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3303 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3306 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307 dmae->comp_addr_hi = 0;
3310 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312 dmae->opcode = opcode;
3313 dmae->src_addr_lo = (mac_addr +
3314 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3317 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3318 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3320 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3327 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328 dmae->opcode = opcode;
3329 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331 dmae->src_addr_hi = 0;
3332 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->len = (2*sizeof(u32)) >> 2;
3349 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350 dmae->comp_addr_hi = 0;
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3358 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3360 DMAE_CMD_ENDIANITY_DW_SWAP |
3362 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363 (vn << DMAE_CMD_E1HVN_SHIFT));
3364 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3366 dmae->src_addr_hi = 0;
3367 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->len = (2*sizeof(u32)) >> 2;
3372 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_val = DMAE_COMP_VAL;
3379 static void bnx2x_func_stats_init(struct bnx2x *bp)
3381 struct dmae_command *dmae = &bp->stats_dmae;
3382 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3385 if (!bp->func_stx) {
3386 BNX2X_ERR("BUG!\n");
3390 bp->executer_idx = 0;
3391 memset(dmae, 0, sizeof(struct dmae_command));
3393 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3397 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3399 DMAE_CMD_ENDIANITY_DW_SWAP |
3401 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->dst_addr_lo = bp->func_stx >> 2;
3406 dmae->dst_addr_hi = 0;
3407 dmae->len = sizeof(struct host_func_stats) >> 2;
3408 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_val = DMAE_COMP_VAL;
3415 static void bnx2x_stats_start(struct bnx2x *bp)
3418 bnx2x_port_stats_init(bp);
3420 else if (bp->func_stx)
3421 bnx2x_func_stats_init(bp);
3423 bnx2x_hw_stats_post(bp);
3424 bnx2x_storm_stats_post(bp);
3427 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3429 bnx2x_stats_comp(bp);
3430 bnx2x_stats_pmf_update(bp);
3431 bnx2x_stats_start(bp);
3434 static void bnx2x_stats_restart(struct bnx2x *bp)
3436 bnx2x_stats_comp(bp);
3437 bnx2x_stats_start(bp);
3440 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3442 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444 struct regpair diff;
3446 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3452 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3453 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458 UPDATE_STAT64(tx_stat_gt127,
3459 tx_stat_etherstatspkts65octetsto127octets);
3460 UPDATE_STAT64(tx_stat_gt255,
3461 tx_stat_etherstatspkts128octetsto255octets);
3462 UPDATE_STAT64(tx_stat_gt511,
3463 tx_stat_etherstatspkts256octetsto511octets);
3464 UPDATE_STAT64(tx_stat_gt1023,
3465 tx_stat_etherstatspkts512octetsto1023octets);
3466 UPDATE_STAT64(tx_stat_gt1518,
3467 tx_stat_etherstatspkts1024octetsto1522octets);
3468 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472 UPDATE_STAT64(tx_stat_gterr,
3473 tx_stat_dot3statsinternalmactransmiterrors);
3474 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3477 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3479 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3515 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3517 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518 struct nig_stats *old = &(bp->port.old_nig_stats);
3519 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521 struct regpair diff;
3523 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524 bnx2x_bmac_stats_update(bp);
3526 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527 bnx2x_emac_stats_update(bp);
3529 else { /* unreached */
3530 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3534 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535 new->brb_discard - old->brb_discard);
3536 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537 new->brb_truncate - old->brb_truncate);
3539 UPDATE_STAT64_NIG(egress_mac_pkt0,
3540 etherstatspkts1024octetsto1522octets);
3541 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3543 memcpy(old, new, sizeof(struct nig_stats));
3545 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546 sizeof(struct mac_stx));
3547 estats->brb_drop_hi = pstats->brb_drop_hi;
3548 estats->brb_drop_lo = pstats->brb_drop_lo;
3550 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3555 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3557 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3558 int cl_id = BP_CL_ID(bp);
3559 struct tstorm_per_port_stats *tport =
3560 &stats->tstorm_common.port_statistics;
3561 struct tstorm_per_client_stats *tclient =
3562 &stats->tstorm_common.client_statistics[cl_id];
3563 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3564 struct xstorm_per_client_stats *xclient =
3565 &stats->xstorm_common.client_statistics[cl_id];
3566 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3571 /* are storm stats valid? */
3572 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573 bp->stats_counter) {
3574 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575 " tstorm counter (%d) != stats_counter (%d)\n",
3576 tclient->stats_counter, bp->stats_counter);
3579 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580 bp->stats_counter) {
3581 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582 " xstorm counter (%d) != stats_counter (%d)\n",
3583 xclient->stats_counter, bp->stats_counter);
3587 fstats->total_bytes_received_hi =
3588 fstats->valid_bytes_received_hi =
3589 le32_to_cpu(tclient->total_rcv_bytes.hi);
3590 fstats->total_bytes_received_lo =
3591 fstats->valid_bytes_received_lo =
3592 le32_to_cpu(tclient->total_rcv_bytes.lo);
3594 estats->error_bytes_received_hi =
3595 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596 estats->error_bytes_received_lo =
3597 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598 ADD_64(estats->error_bytes_received_hi,
3599 estats->rx_stat_ifhcinbadoctets_hi,
3600 estats->error_bytes_received_lo,
3601 estats->rx_stat_ifhcinbadoctets_lo);
3603 ADD_64(fstats->total_bytes_received_hi,
3604 estats->error_bytes_received_hi,
3605 fstats->total_bytes_received_lo,
3606 estats->error_bytes_received_lo);
3608 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3609 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3610 total_multicast_packets_received);
3611 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3612 total_broadcast_packets_received);
3614 fstats->total_bytes_transmitted_hi =
3615 le32_to_cpu(xclient->total_sent_bytes.hi);
3616 fstats->total_bytes_transmitted_lo =
3617 le32_to_cpu(xclient->total_sent_bytes.lo);
3619 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620 total_unicast_packets_transmitted);
3621 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622 total_multicast_packets_transmitted);
3623 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624 total_broadcast_packets_transmitted);
3626 memcpy(estats, &(fstats->total_bytes_received_hi),
3627 sizeof(struct host_func_stats) - 2*sizeof(u32));
3629 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631 estats->brb_truncate_discard =
3632 le32_to_cpu(tport->brb_truncate_discard);
3633 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3635 old_tclient->rcv_unicast_bytes.hi =
3636 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3637 old_tclient->rcv_unicast_bytes.lo =
3638 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3639 old_tclient->rcv_broadcast_bytes.hi =
3640 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3641 old_tclient->rcv_broadcast_bytes.lo =
3642 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3643 old_tclient->rcv_multicast_bytes.hi =
3644 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3645 old_tclient->rcv_multicast_bytes.lo =
3646 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3647 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3649 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650 old_tclient->packets_too_big_discard =
3651 le32_to_cpu(tclient->packets_too_big_discard);
3652 estats->no_buff_discard =
3653 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3656 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657 old_xclient->unicast_bytes_sent.hi =
3658 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659 old_xclient->unicast_bytes_sent.lo =
3660 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661 old_xclient->multicast_bytes_sent.hi =
3662 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663 old_xclient->multicast_bytes_sent.lo =
3664 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665 old_xclient->broadcast_bytes_sent.hi =
3666 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667 old_xclient->broadcast_bytes_sent.lo =
3668 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3670 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3675 static void bnx2x_net_stats_update(struct bnx2x *bp)
3677 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3679 struct net_device_stats *nstats = &bp->dev->stats;
3681 nstats->rx_packets =
3682 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3686 nstats->tx_packets =
3687 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3691 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3693 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3695 nstats->rx_dropped = old_tclient->checksum_discard +
3696 estats->mac_discard;
3697 nstats->tx_dropped = 0;
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3702 nstats->collisions =
3703 estats->tx_stat_dot3statssinglecollisionframes_lo +
3704 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705 estats->tx_stat_dot3statslatecollisions_lo +
3706 estats->tx_stat_dot3statsexcessivecollisions_lo;
3708 estats->jabber_packets_received =
3709 old_tclient->packets_too_big_discard +
3710 estats->rx_stat_dot3statsframestoolong_lo;
3712 nstats->rx_length_errors =
3713 estats->rx_stat_etherstatsundersizepkts_lo +
3714 estats->jabber_packets_received;
3715 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3716 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3719 nstats->rx_missed_errors = estats->xxoverflow_discard;
3721 nstats->rx_errors = nstats->rx_length_errors +
3722 nstats->rx_over_errors +
3723 nstats->rx_crc_errors +
3724 nstats->rx_frame_errors +
3725 nstats->rx_fifo_errors +
3726 nstats->rx_missed_errors;
3728 nstats->tx_aborted_errors =
3729 estats->tx_stat_dot3statslatecollisions_lo +
3730 estats->tx_stat_dot3statsexcessivecollisions_lo;
3731 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3732 nstats->tx_fifo_errors = 0;
3733 nstats->tx_heartbeat_errors = 0;
3734 nstats->tx_window_errors = 0;
3736 nstats->tx_errors = nstats->tx_aborted_errors +
3737 nstats->tx_carrier_errors;
3740 static void bnx2x_stats_update(struct bnx2x *bp)
3742 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3745 if (*stats_comp != DMAE_COMP_VAL)
3749 update = (bnx2x_hw_stats_update(bp) == 0);
3751 update |= (bnx2x_storm_stats_update(bp) == 0);
3754 bnx2x_net_stats_update(bp);
3757 if (bp->stats_pending) {
3758 bp->stats_pending++;
3759 if (bp->stats_pending == 3) {
3760 BNX2X_ERR("stats not updated for 3 times\n");
3767 if (bp->msglevel & NETIF_MSG_TIMER) {
3768 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3770 struct net_device_stats *nstats = &bp->dev->stats;
3773 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3776 bnx2x_tx_avail(bp->fp),
3777 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3778 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3780 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781 bp->fp->rx_comp_cons),
3782 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3783 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3784 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3785 estats->driver_xoff, estats->brb_drop_lo);
3786 printk(KERN_DEBUG "tstats: checksum_discard %u "
3787 "packets_too_big_discard %u no_buff_discard %u "
3788 "mac_discard %u mac_filter_discard %u "
3789 "xxovrflow_discard %u brb_truncate_discard %u "
3790 "ttl0_discard %u\n",
3791 old_tclient->checksum_discard,
3792 old_tclient->packets_too_big_discard,
3793 old_tclient->no_buff_discard, estats->mac_discard,
3794 estats->mac_filter_discard, estats->xxoverflow_discard,
3795 estats->brb_truncate_discard,
3796 old_tclient->ttl0_discard);
3798 for_each_queue(bp, i) {
3799 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800 bnx2x_fp(bp, i, tx_pkt),
3801 bnx2x_fp(bp, i, rx_pkt),
3802 bnx2x_fp(bp, i, rx_calls));
3806 bnx2x_hw_stats_post(bp);
3807 bnx2x_storm_stats_post(bp);
3810 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3812 struct dmae_command *dmae;
3814 int loader_idx = PMF_DMAE_C(bp);
3815 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3817 bp->executer_idx = 0;
3819 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3821 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3823 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3825 DMAE_CMD_ENDIANITY_DW_SWAP |
3827 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3830 if (bp->port.port_stx) {
3832 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3834 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3840 dmae->dst_addr_hi = 0;
3841 dmae->len = sizeof(struct host_port_stats) >> 2;
3843 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844 dmae->comp_addr_hi = 0;
3847 dmae->comp_addr_lo =
3848 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849 dmae->comp_addr_hi =
3850 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_val = DMAE_COMP_VAL;
3859 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->dst_addr_lo = bp->func_stx >> 2;
3864 dmae->dst_addr_hi = 0;
3865 dmae->len = sizeof(struct host_func_stats) >> 2;
3866 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_val = DMAE_COMP_VAL;
3874 static void bnx2x_stats_stop(struct bnx2x *bp)
3878 bnx2x_stats_comp(bp);
3881 update = (bnx2x_hw_stats_update(bp) == 0);
3883 update |= (bnx2x_storm_stats_update(bp) == 0);
3886 bnx2x_net_stats_update(bp);
3889 bnx2x_port_stats_stop(bp);
3891 bnx2x_hw_stats_post(bp);
3892 bnx2x_stats_comp(bp);
3896 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3900 static const struct {
3901 void (*action)(struct bnx2x *bp);
3902 enum bnx2x_stats_state next_state;
3903 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3906 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3908 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3912 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3913 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3914 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3915 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3919 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3921 enum bnx2x_stats_state state = bp->stats_state;
3923 bnx2x_stats_stm[state][event].action(bp);
3924 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3926 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928 state, event, bp->stats_state);
3931 static void bnx2x_timer(unsigned long data)
3933 struct bnx2x *bp = (struct bnx2x *) data;
3935 if (!netif_running(bp->dev))
3938 if (atomic_read(&bp->intr_sem) != 0)
3942 struct bnx2x_fastpath *fp = &bp->fp[0];
3945 bnx2x_tx_int(fp, 1000);
3946 rc = bnx2x_rx_int(fp, 1000);
3949 if (!BP_NOMCP(bp)) {
3950 int func = BP_FUNC(bp);
3954 ++bp->fw_drv_pulse_wr_seq;
3955 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956 /* TBD - add SYSTEM_TIME */
3957 drv_pulse = bp->fw_drv_pulse_wr_seq;
3958 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3960 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3961 MCP_PULSE_SEQ_MASK);
3962 /* The delta between driver pulse and mcp response
3963 * should be 1 (before mcp response) or 0 (after mcp response)
3965 if ((drv_pulse != mcp_pulse) &&
3966 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967 /* someone lost a heartbeat... */
3968 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969 drv_pulse, mcp_pulse);
3973 if ((bp->state == BNX2X_STATE_OPEN) ||
3974 (bp->state == BNX2X_STATE_DISABLED))
3975 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3978 mod_timer(&bp->timer, jiffies + bp->current_interval);
3981 /* end of Statistics */
3986 * nic init service functions
3989 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3991 int port = BP_PORT(bp);
3993 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3995 sizeof(struct ustorm_def_status_block)/4);
3996 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3998 sizeof(struct cstorm_def_status_block)/4);
4001 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4002 struct host_status_block *sb, dma_addr_t mapping)
4004 int port = BP_PORT(bp);
4005 int func = BP_FUNC(bp);
4010 section = ((u64)mapping) + offsetof(struct host_status_block,
4012 sb->u_status_block.status_block_id = sb_id;
4014 REG_WR(bp, BAR_USTRORM_INTMEM +
4015 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4016 REG_WR(bp, BAR_USTRORM_INTMEM +
4017 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4019 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4022 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023 REG_WR16(bp, BAR_USTRORM_INTMEM +
4024 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4027 section = ((u64)mapping) + offsetof(struct host_status_block,
4029 sb->c_status_block.status_block_id = sb_id;
4031 REG_WR(bp, BAR_CSTRORM_INTMEM +
4032 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
4034 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4036 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4039 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4041 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4043 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4046 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4048 int func = BP_FUNC(bp);
4050 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052 sizeof(struct ustorm_def_status_block)/4);
4053 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055 sizeof(struct cstorm_def_status_block)/4);
4056 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058 sizeof(struct xstorm_def_status_block)/4);
4059 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061 sizeof(struct tstorm_def_status_block)/4);
4064 static void bnx2x_init_def_sb(struct bnx2x *bp,
4065 struct host_def_status_block *def_sb,
4066 dma_addr_t mapping, int sb_id)
4068 int port = BP_PORT(bp);
4069 int func = BP_FUNC(bp);
4070 int index, val, reg_offset;
4074 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075 atten_status_block);
4076 def_sb->atten_status_block.status_block_id = sb_id;
4078 bp->def_att_idx = 0;
4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4084 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4085 bp->attn_group[index].sig[0] = REG_RD(bp,
4086 reg_offset + 0x10*index);
4087 bp->attn_group[index].sig[1] = REG_RD(bp,
4088 reg_offset + 0x4 + 0x10*index);
4089 bp->attn_group[index].sig[2] = REG_RD(bp,
4090 reg_offset + 0x8 + 0x10*index);
4091 bp->attn_group[index].sig[3] = REG_RD(bp,
4092 reg_offset + 0xc + 0x10*index);
4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096 HC_REG_ATTN_MSG0_ADDR_L);
4098 REG_WR(bp, reg_offset, U64_LO(section));
4099 REG_WR(bp, reg_offset + 4, U64_HI(section));
4101 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4103 val = REG_RD(bp, reg_offset);
4105 REG_WR(bp, reg_offset, val);
4108 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109 u_def_status_block);
4110 def_sb->u_def_status_block.status_block_id = sb_id;
4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4115 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4116 REG_WR(bp, BAR_USTRORM_INTMEM +
4117 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4119 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4120 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4121 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4124 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4125 REG_WR16(bp, BAR_USTRORM_INTMEM +
4126 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4129 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4130 c_def_status_block);
4131 def_sb->c_def_status_block.status_block_id = sb_id;
4135 REG_WR(bp, BAR_CSTRORM_INTMEM +
4136 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4137 REG_WR(bp, BAR_CSTRORM_INTMEM +
4138 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4140 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4141 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4145 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4146 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4147 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4150 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4151 t_def_status_block);
4152 def_sb->t_def_status_block.status_block_id = sb_id;
4156 REG_WR(bp, BAR_TSTRORM_INTMEM +
4157 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4158 REG_WR(bp, BAR_TSTRORM_INTMEM +
4159 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4161 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4162 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4166 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4167 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4168 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4171 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4172 x_def_status_block);
4173 def_sb->x_def_status_block.status_block_id = sb_id;
4177 REG_WR(bp, BAR_XSTRORM_INTMEM +
4178 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4179 REG_WR(bp, BAR_XSTRORM_INTMEM +
4180 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4182 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4183 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4187 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4188 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4189 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4191 bp->stats_pending = 0;
4192 bp->set_mac_pending = 0;
4194 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4197 static void bnx2x_update_coalesce(struct bnx2x *bp)
4199 int port = BP_PORT(bp);
4202 for_each_queue(bp, i) {
4203 int sb_id = bp->fp[i].sb_id;
4205 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4206 REG_WR8(bp, BAR_USTRORM_INTMEM +
4207 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4208 HC_INDEX_U_ETH_RX_CQ_CONS),
4210 REG_WR16(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4212 HC_INDEX_U_ETH_RX_CQ_CONS),
4213 bp->rx_ticks ? 0 : 1);
4215 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4216 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4217 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4218 HC_INDEX_C_ETH_TX_CQ_CONS),
4220 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4221 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4222 HC_INDEX_C_ETH_TX_CQ_CONS),
4223 bp->tx_ticks ? 0 : 1);
4227 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4228 struct bnx2x_fastpath *fp, int last)
4232 for (i = 0; i < last; i++) {
4233 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4234 struct sk_buff *skb = rx_buf->skb;
4237 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4241 if (fp->tpa_state[i] == BNX2X_TPA_START)
4242 pci_unmap_single(bp->pdev,
4243 pci_unmap_addr(rx_buf, mapping),
4244 bp->rx_buf_use_size,
4245 PCI_DMA_FROMDEVICE);
4252 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4254 int func = BP_FUNC(bp);
4255 u16 ring_prod, cqe_ring_prod = 0;
4258 bp->rx_buf_use_size = bp->dev->mtu;
4259 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4260 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4262 if (bp->flags & TPA_ENABLE_FLAG) {
4264 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4265 bp->rx_buf_use_size, bp->rx_buf_size,
4266 bp->dev->mtu + ETH_OVREHEAD);
4268 for_each_queue(bp, j) {
4269 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4270 struct bnx2x_fastpath *fp = &bp->fp[j];
4272 fp->tpa_pool[i].skb =
4273 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4274 if (!fp->tpa_pool[i].skb) {
4275 BNX2X_ERR("Failed to allocate TPA "
4276 "skb pool for queue[%d] - "
4277 "disabling TPA on this "
4279 bnx2x_free_tpa_pool(bp, fp, i);
4280 fp->disable_tpa = 1;
4283 pci_unmap_addr_set((struct sw_rx_bd *)
4284 &bp->fp->tpa_pool[i],
4286 fp->tpa_state[i] = BNX2X_TPA_STOP;
4291 for_each_queue(bp, j) {
4292 struct bnx2x_fastpath *fp = &bp->fp[j];
4295 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4296 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4298 /* "next page" elements initialization */
4300 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4301 struct eth_rx_sge *sge;
4303 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4305 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4306 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4308 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4312 bnx2x_init_sge_ring_bit_mask(fp);
4315 for (i = 1; i <= NUM_RX_RINGS; i++) {
4316 struct eth_rx_bd *rx_bd;
4318 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4320 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4323 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4324 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4328 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4329 struct eth_rx_cqe_next_page *nextpg;
4331 nextpg = (struct eth_rx_cqe_next_page *)
4332 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4334 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4335 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4337 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4338 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4341 /* Allocate SGEs and initialize the ring elements */
4342 for (i = 0, ring_prod = 0;
4343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4346 BNX2X_ERR("was only able to allocate "
4348 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4349 /* Cleanup already allocated elements */
4350 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4351 bnx2x_free_tpa_pool(bp, fp,
4352 ETH_MAX_AGGREGATION_QUEUES_E1H);
4353 fp->disable_tpa = 1;
4357 ring_prod = NEXT_SGE_IDX(ring_prod);
4359 fp->rx_sge_prod = ring_prod;
4361 /* Allocate BDs and initialize BD ring */
4362 fp->rx_comp_cons = 0;
4363 cqe_ring_prod = ring_prod = 0;
4364 for (i = 0; i < bp->rx_ring_size; i++) {
4365 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4366 BNX2X_ERR("was only able to allocate "
4368 bp->eth_stats.rx_skb_alloc_failed++;
4371 ring_prod = NEXT_RX_IDX(ring_prod);
4372 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4373 WARN_ON(ring_prod <= i);
4376 fp->rx_bd_prod = ring_prod;
4377 /* must not have more available CQEs than BDs */
4378 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4380 fp->rx_pkt = fp->rx_calls = 0;
4383 * this will generate an interrupt (to the TSTORM)
4384 * must only be done after chip is initialized
4386 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4391 REG_WR(bp, BAR_USTRORM_INTMEM +
4392 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4393 U64_LO(fp->rx_comp_mapping));
4394 REG_WR(bp, BAR_USTRORM_INTMEM +
4395 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4396 U64_HI(fp->rx_comp_mapping));
4400 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4404 for_each_queue(bp, j) {
4405 struct bnx2x_fastpath *fp = &bp->fp[j];
4407 for (i = 1; i <= NUM_TX_RINGS; i++) {
4408 struct eth_tx_bd *tx_bd =
4409 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4412 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4413 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4415 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4416 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4419 fp->tx_pkt_prod = 0;
4420 fp->tx_pkt_cons = 0;
4423 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4428 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4430 int func = BP_FUNC(bp);
4432 spin_lock_init(&bp->spq_lock);
4434 bp->spq_left = MAX_SPQ_PENDING;
4435 bp->spq_prod_idx = 0;
4436 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4437 bp->spq_prod_bd = bp->spq;
4438 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4440 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4441 U64_LO(bp->spq_mapping));
4443 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4444 U64_HI(bp->spq_mapping));
4446 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4450 static void bnx2x_init_context(struct bnx2x *bp)
4454 for_each_queue(bp, i) {
4455 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4456 struct bnx2x_fastpath *fp = &bp->fp[i];
4457 u8 sb_id = FP_SB_ID(fp);
4459 context->xstorm_st_context.tx_bd_page_base_hi =
4460 U64_HI(fp->tx_desc_mapping);
4461 context->xstorm_st_context.tx_bd_page_base_lo =
4462 U64_LO(fp->tx_desc_mapping);
4463 context->xstorm_st_context.db_data_addr_hi =
4464 U64_HI(fp->tx_prods_mapping);
4465 context->xstorm_st_context.db_data_addr_lo =
4466 U64_LO(fp->tx_prods_mapping);
4467 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4468 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4470 context->ustorm_st_context.common.sb_index_numbers =
4471 BNX2X_RX_SB_INDEX_NUM;
4472 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4473 context->ustorm_st_context.common.status_block_id = sb_id;
4474 context->ustorm_st_context.common.flags =
4475 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4476 context->ustorm_st_context.common.mc_alignment_size = 64;
4477 context->ustorm_st_context.common.bd_buff_size =
4478 bp->rx_buf_use_size;
4479 context->ustorm_st_context.common.bd_page_base_hi =
4480 U64_HI(fp->rx_desc_mapping);
4481 context->ustorm_st_context.common.bd_page_base_lo =
4482 U64_LO(fp->rx_desc_mapping);
4483 if (!fp->disable_tpa) {
4484 context->ustorm_st_context.common.flags |=
4485 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4486 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4487 context->ustorm_st_context.common.sge_buff_size =
4488 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4489 context->ustorm_st_context.common.sge_page_base_hi =
4490 U64_HI(fp->rx_sge_mapping);
4491 context->ustorm_st_context.common.sge_page_base_lo =
4492 U64_LO(fp->rx_sge_mapping);
4495 context->cstorm_st_context.sb_index_number =
4496 HC_INDEX_C_ETH_TX_CQ_CONS;
4497 context->cstorm_st_context.status_block_id = sb_id;
4499 context->xstorm_ag_context.cdu_reserved =
4500 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4501 CDU_REGION_NUMBER_XCM_AG,
4502 ETH_CONNECTION_TYPE);
4503 context->ustorm_ag_context.cdu_usage =
4504 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4505 CDU_REGION_NUMBER_UCM_AG,
4506 ETH_CONNECTION_TYPE);
4510 static void bnx2x_init_ind_table(struct bnx2x *bp)
4512 int port = BP_PORT(bp);
4518 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4519 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4520 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4521 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4522 i % bp->num_queues);
4524 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4527 static void bnx2x_set_client_config(struct bnx2x *bp)
4529 struct tstorm_eth_client_config tstorm_client = {0};
4530 int port = BP_PORT(bp);
4533 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4534 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4535 tstorm_client.config_flags =
4536 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4538 if (bp->rx_mode && bp->vlgrp) {
4539 tstorm_client.config_flags |=
4540 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4541 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4545 if (bp->flags & TPA_ENABLE_FLAG) {
4546 tstorm_client.max_sges_for_packet =
4547 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4548 tstorm_client.max_sges_for_packet =
4549 ((tstorm_client.max_sges_for_packet +
4550 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4551 PAGES_PER_SGE_SHIFT;
4553 tstorm_client.config_flags |=
4554 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4557 for_each_queue(bp, i) {
4558 REG_WR(bp, BAR_TSTRORM_INTMEM +
4559 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4560 ((u32 *)&tstorm_client)[0]);
4561 REG_WR(bp, BAR_TSTRORM_INTMEM +
4562 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4563 ((u32 *)&tstorm_client)[1]);
4566 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4567 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4570 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4572 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4573 int mode = bp->rx_mode;
4574 int mask = (1 << BP_L_ID(bp));
4575 int func = BP_FUNC(bp);
4578 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4581 case BNX2X_RX_MODE_NONE: /* no Rx */
4582 tstorm_mac_filter.ucast_drop_all = mask;
4583 tstorm_mac_filter.mcast_drop_all = mask;
4584 tstorm_mac_filter.bcast_drop_all = mask;
4586 case BNX2X_RX_MODE_NORMAL:
4587 tstorm_mac_filter.bcast_accept_all = mask;
4589 case BNX2X_RX_MODE_ALLMULTI:
4590 tstorm_mac_filter.mcast_accept_all = mask;
4591 tstorm_mac_filter.bcast_accept_all = mask;
4593 case BNX2X_RX_MODE_PROMISC:
4594 tstorm_mac_filter.ucast_accept_all = mask;
4595 tstorm_mac_filter.mcast_accept_all = mask;
4596 tstorm_mac_filter.bcast_accept_all = mask;
4599 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4603 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4604 REG_WR(bp, BAR_TSTRORM_INTMEM +
4605 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4606 ((u32 *)&tstorm_mac_filter)[i]);
4608 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4609 ((u32 *)&tstorm_mac_filter)[i]); */
4612 if (mode != BNX2X_RX_MODE_NONE)
4613 bnx2x_set_client_config(bp);
4616 static void bnx2x_init_internal_common(struct bnx2x *bp)
4620 /* Zero this manually as its initialization is
4621 currently missing in the initTool */
4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4623 REG_WR(bp, BAR_USTRORM_INTMEM +
4624 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4627 static void bnx2x_init_internal_port(struct bnx2x *bp)
4629 int port = BP_PORT(bp);
4631 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4637 static void bnx2x_init_internal_func(struct bnx2x *bp)
4639 struct tstorm_eth_function_common_config tstorm_config = {0};
4640 struct stats_indication_flags stats_flags = {0};
4641 int port = BP_PORT(bp);
4642 int func = BP_FUNC(bp);
4647 tstorm_config.config_flags = MULTI_FLAGS;
4648 tstorm_config.rss_result_mask = MULTI_MASK;
4651 tstorm_config.leading_client_id = BP_L_ID(bp);
4653 REG_WR(bp, BAR_TSTRORM_INTMEM +
4654 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4655 (*(u32 *)&tstorm_config));
4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4658 bnx2x_set_storm_rx_mode(bp);
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4673 /* Init statistics related context */
4674 stats_flags.collect_eth = 1;
4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4677 ((u32 *)&stats_flags)[0]);
4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4679 ((u32 *)&stats_flags)[1]);
4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4682 ((u32 *)&stats_flags)[0]);
4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4684 ((u32 *)&stats_flags)[1]);
4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4687 ((u32 *)&stats_flags)[0]);
4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4689 ((u32 *)&stats_flags)[1]);
4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4705 if (CHIP_IS_E1H(bp)) {
4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4708 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4712 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4715 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4719 /* Init CQ ring mapping and aggregation size */
4720 max_agg_size = min((u32)(bp->rx_buf_use_size +
4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4723 for_each_queue(bp, i) {
4724 struct bnx2x_fastpath *fp = &bp->fp[i];
4726 REG_WR(bp, BAR_USTRORM_INTMEM +
4727 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4728 U64_LO(fp->rx_comp_mapping));
4729 REG_WR(bp, BAR_USTRORM_INTMEM +
4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4731 U64_HI(fp->rx_comp_mapping));
4733 REG_WR16(bp, BAR_USTRORM_INTMEM +
4734 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4739 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4741 switch (load_code) {
4742 case FW_MSG_CODE_DRV_LOAD_COMMON:
4743 bnx2x_init_internal_common(bp);
4746 case FW_MSG_CODE_DRV_LOAD_PORT:
4747 bnx2x_init_internal_port(bp);
4750 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4751 bnx2x_init_internal_func(bp);
4755 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4760 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4764 for_each_queue(bp, i) {
4765 struct bnx2x_fastpath *fp = &bp->fp[i];
4768 fp->state = BNX2X_FP_STATE_CLOSED;
4770 fp->cl_id = BP_L_ID(bp) + i;
4771 fp->sb_id = fp->cl_id;
4773 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4774 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4775 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4776 fp->status_blk_mapping);
4779 bnx2x_init_def_sb(bp, bp->def_status_blk,
4780 bp->def_status_blk_mapping, DEF_SB_ID);
4781 bnx2x_update_coalesce(bp);
4782 bnx2x_init_rx_rings(bp);
4783 bnx2x_init_tx_ring(bp);
4784 bnx2x_init_sp_ring(bp);
4785 bnx2x_init_context(bp);
4786 bnx2x_init_internal(bp, load_code);
4787 bnx2x_init_ind_table(bp);
4788 bnx2x_int_enable(bp);
4791 /* end of nic init */
4794 * gzip service functions
4797 static int bnx2x_gunzip_init(struct bnx2x *bp)
4799 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4800 &bp->gunzip_mapping);
4801 if (bp->gunzip_buf == NULL)
4804 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4805 if (bp->strm == NULL)
4808 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4810 if (bp->strm->workspace == NULL)
4820 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4821 bp->gunzip_mapping);
4822 bp->gunzip_buf = NULL;
4825 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4826 " un-compression\n", bp->dev->name);
4830 static void bnx2x_gunzip_end(struct bnx2x *bp)
4832 kfree(bp->strm->workspace);
4837 if (bp->gunzip_buf) {
4838 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4839 bp->gunzip_mapping);
4840 bp->gunzip_buf = NULL;
4844 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4848 /* check gzip header */
4849 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4856 if (zbuf[3] & FNAME)
4857 while ((zbuf[n++] != 0) && (n < len));
4859 bp->strm->next_in = zbuf + n;
4860 bp->strm->avail_in = len - n;
4861 bp->strm->next_out = bp->gunzip_buf;
4862 bp->strm->avail_out = FW_BUF_SIZE;
4864 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4868 rc = zlib_inflate(bp->strm, Z_FINISH);
4869 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4870 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4871 bp->dev->name, bp->strm->msg);
4873 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4874 if (bp->gunzip_outlen & 0x3)
4875 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4876 " gunzip_outlen (%d) not aligned\n",
4877 bp->dev->name, bp->gunzip_outlen);
4878 bp->gunzip_outlen >>= 2;
4880 zlib_inflateEnd(bp->strm);
4882 if (rc == Z_STREAM_END)
4888 /* nic load/unload */
4891 * General service functions
4894 /* send a NIG loopback debug packet */
4895 static void bnx2x_lb_pckt(struct bnx2x *bp)
4899 /* Ethernet source and destination addresses */
4900 wb_write[0] = 0x55555555;
4901 wb_write[1] = 0x55555555;
4902 wb_write[2] = 0x20; /* SOP */
4903 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4905 /* NON-IP protocol */
4906 wb_write[0] = 0x09000000;
4907 wb_write[1] = 0x55555555;
4908 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4909 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4912 /* some of the internal memories
4913 * are not directly readable from the driver
4914 * to test them we send debug packets
4916 static int bnx2x_int_mem_test(struct bnx2x *bp)
4922 if (CHIP_REV_IS_FPGA(bp))
4924 else if (CHIP_REV_IS_EMUL(bp))
4929 DP(NETIF_MSG_HW, "start part1\n");
4931 /* Disable inputs of parser neighbor blocks */
4932 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4933 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4934 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4935 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4937 /* Write 0 to parser credits for CFC search request */
4938 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4940 /* send Ethernet packet */
4943 /* TODO do i reset NIG statistic? */
4944 /* Wait until NIG register shows 1 packet of size 0x10 */
4945 count = 1000 * factor;
4948 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4949 val = *bnx2x_sp(bp, wb_data[0]);
4957 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4961 /* Wait until PRS register shows 1 packet */
4962 count = 1000 * factor;
4964 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4972 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4976 /* Reset and init BRB, PRS */
4977 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4979 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4981 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4982 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4984 DP(NETIF_MSG_HW, "part2\n");
4986 /* Disable inputs of parser neighbor blocks */
4987 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4990 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4992 /* Write 0 to parser credits for CFC search request */
4993 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4995 /* send 10 Ethernet packets */
4996 for (i = 0; i < 10; i++)
4999 /* Wait until NIG register shows 10 + 1
5000 packets of size 11*0x10 = 0xb0 */
5001 count = 1000 * factor;
5004 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5005 val = *bnx2x_sp(bp, wb_data[0]);
5013 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5017 /* Wait until PRS register shows 2 packets */
5018 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5022 /* Write 1 to parser credits for CFC search request */
5023 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5025 /* Wait until PRS register shows 3 packets */
5026 msleep(10 * factor);
5027 /* Wait until NIG register shows 1 packet of size 0x10 */
5028 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5030 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5032 /* clear NIG EOP FIFO */
5033 for (i = 0; i < 11; i++)
5034 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5035 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5037 BNX2X_ERR("clear of NIG failed\n");
5041 /* Reset and init BRB, PRS, NIG */
5042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5046 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5047 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5050 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5053 /* Enable inputs of parser neighbor blocks */
5054 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5055 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5056 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5057 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5059 DP(NETIF_MSG_HW, "done\n");
5064 static void enable_blocks_attention(struct bnx2x *bp)
5066 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5067 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5068 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5069 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5070 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5071 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5072 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5073 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5074 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5075 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5076 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5077 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5078 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5079 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5080 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5081 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5082 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5083 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5084 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5085 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5086 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5087 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5088 if (CHIP_REV_IS_FPGA(bp))
5089 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5091 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5092 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5093 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5094 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5095 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5096 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5097 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5098 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5099 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5100 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5104 static int bnx2x_init_common(struct bnx2x *bp)
5108 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5110 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5111 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5113 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5114 if (CHIP_IS_E1H(bp))
5115 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5117 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5119 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5121 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5122 if (CHIP_IS_E1(bp)) {
5123 /* enable HW interrupt from PXP on USDM overflow
5124 bit 16 on INT_MASK_0 */
5125 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5128 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5132 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5133 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5134 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5139 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5140 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5141 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5142 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5143 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5148 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5151 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5153 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5154 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5155 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5158 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5159 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5161 /* let the HW do it's magic ... */
5163 /* finish PXP init */
5164 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5166 BNX2X_ERR("PXP2 CFG failed\n");
5169 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5171 BNX2X_ERR("PXP2 RD_INIT failed\n");
5175 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5176 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5178 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5180 /* clean the DMAE memory */
5182 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5184 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5185 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5186 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5187 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5189 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5190 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5191 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5192 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5194 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5195 /* soft reset pulse */
5196 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5197 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5200 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5203 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5204 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5205 if (!CHIP_REV_IS_SLOW(bp)) {
5206 /* enable hw interrupt from doorbell Q */
5207 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5210 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5211 if (CHIP_REV_IS_SLOW(bp)) {
5212 /* fix for emulation and FPGA for no pause */
5213 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5214 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5215 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5216 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5219 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5220 if (CHIP_IS_E1H(bp))
5221 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5223 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5224 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5225 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5226 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5228 if (CHIP_IS_E1H(bp)) {
5229 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5230 STORM_INTMEM_SIZE_E1H/2);
5232 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5233 0, STORM_INTMEM_SIZE_E1H/2);
5234 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5235 STORM_INTMEM_SIZE_E1H/2);
5237 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5238 0, STORM_INTMEM_SIZE_E1H/2);
5239 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1H/2);
5242 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5243 0, STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1H/2);
5247 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5248 0, STORM_INTMEM_SIZE_E1H/2);
5250 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1);
5252 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5253 STORM_INTMEM_SIZE_E1);
5254 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1);
5256 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5257 STORM_INTMEM_SIZE_E1);
5260 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5261 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5262 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5263 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5266 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5268 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5271 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5272 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5273 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5275 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5276 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5277 REG_WR(bp, i, 0xc0cac01a);
5278 /* TODO: replace with something meaningful */
5280 if (CHIP_IS_E1H(bp))
5281 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5282 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5284 if (sizeof(union cdu_context) != 1024)
5285 /* we currently assume that a context is 1024 bytes */
5286 printk(KERN_ALERT PFX "please adjust the size of"
5287 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5289 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5290 val = (4 << 24) + (0 << 12) + 1024;
5291 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5292 if (CHIP_IS_E1(bp)) {
5293 /* !!! fix pxp client crdit until excel update */
5294 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5295 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5298 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5299 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5301 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5302 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5304 /* PXPCS COMMON comes here */
5305 /* Reset PCIE errors for debug */
5306 REG_WR(bp, 0x2814, 0xffffffff);
5307 REG_WR(bp, 0x3820, 0xffffffff);
5309 /* EMAC0 COMMON comes here */
5310 /* EMAC1 COMMON comes here */
5311 /* DBU COMMON comes here */
5312 /* DBG COMMON comes here */
5314 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5315 if (CHIP_IS_E1H(bp)) {
5316 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5317 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5320 if (CHIP_REV_IS_SLOW(bp))
5323 /* finish CFC init */
5324 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5326 BNX2X_ERR("CFC LL_INIT failed\n");
5329 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5331 BNX2X_ERR("CFC AC_INIT failed\n");
5334 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5336 BNX2X_ERR("CFC CAM_INIT failed\n");
5339 REG_WR(bp, CFC_REG_DEBUG0, 0);
5341 /* read NIG statistic
5342 to see if this is our first up since powerup */
5343 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5344 val = *bnx2x_sp(bp, wb_data[0]);
5346 /* do internal memory self test */
5347 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5348 BNX2X_ERR("internal mem self test failed\n");
5352 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5354 /* Fan failure is indicated by SPIO 5 */
5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5356 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5358 /* set to active low mode */
5359 val = REG_RD(bp, MISC_REG_SPIO_INT);
5360 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5361 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5362 REG_WR(bp, MISC_REG_SPIO_INT, val);
5364 /* enable interrupt to signal the IGU */
5365 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5366 val |= (1 << MISC_REGISTERS_SPIO_5);
5367 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5374 /* clear PXP2 attentions */
5375 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5377 enable_blocks_attention(bp);
5379 if (bp->flags & TPA_ENABLE_FLAG) {
5380 struct tstorm_eth_tpa_exist tmp = {0};
5384 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5386 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5393 static int bnx2x_init_port(struct bnx2x *bp)
5395 int port = BP_PORT(bp);
5398 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5400 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5402 /* Port PXP comes here */
5403 /* Port PXP2 comes here */
5408 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5409 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5410 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5411 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5416 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5417 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5418 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5419 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5424 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5425 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5426 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5427 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5429 /* Port CMs come here */
5431 /* Port QM comes here */
5433 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5434 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5436 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5437 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5439 /* Port DQ comes here */
5440 /* Port BRB1 comes here */
5441 /* Port PRS comes here */
5442 /* Port TSDM comes here */
5443 /* Port CSDM comes here */
5444 /* Port USDM comes here */
5445 /* Port XSDM comes here */
5446 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5447 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5448 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5449 port ? USEM_PORT1_END : USEM_PORT0_END);
5450 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5451 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5452 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5453 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5454 /* Port UPB comes here */
5455 /* Port XPB comes here */
5457 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5458 port ? PBF_PORT1_END : PBF_PORT0_END);
5460 /* configure PBF to work without PAUSE mtu 9000 */
5461 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5463 /* update threshold */
5464 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5465 /* update init credit */
5466 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5469 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5471 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5474 /* tell the searcher where the T2 table is */
5475 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5477 wb_write[0] = U64_LO(bp->t2_mapping);
5478 wb_write[1] = U64_HI(bp->t2_mapping);
5479 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5480 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5481 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5482 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5484 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5485 /* Port SRCH comes here */
5487 /* Port CDU comes here */
5488 /* Port CFC comes here */
5490 if (CHIP_IS_E1(bp)) {
5491 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5492 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5494 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5495 port ? HC_PORT1_END : HC_PORT0_END);
5497 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5498 MISC_AEU_PORT0_START,
5499 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5500 /* init aeu_mask_attn_func_0/1:
5501 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5502 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5503 * bits 4-7 are used for "per vn group attention" */
5504 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5505 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5507 /* Port PXPCS comes here */
5508 /* Port EMAC0 comes here */
5509 /* Port EMAC1 comes here */
5510 /* Port DBU comes here */
5511 /* Port DBG comes here */
5512 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5513 port ? NIG_PORT1_END : NIG_PORT0_END);
5515 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5517 if (CHIP_IS_E1H(bp)) {
5519 struct cmng_struct_per_port m_cmng_port;
5522 /* 0x2 disable e1hov, 0x1 enable */
5523 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5524 (IS_E1HMF(bp) ? 0x1 : 0x2));
5526 /* Init RATE SHAPING and FAIRNESS contexts.
5527 Initialize as if there is 10G link. */
5528 wsum = bnx2x_calc_vn_wsum(bp);
5529 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5531 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5532 bnx2x_init_vn_minmax(bp, 2*vn + port,
5533 wsum, 10000, &m_cmng_port);
5536 /* Port MCP comes here */
5537 /* Port DMAE comes here */
5539 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5540 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5541 /* add SPIO 5 to group 0 */
5542 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5543 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5544 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5551 bnx2x__link_reset(bp);
5556 #define ILT_PER_FUNC (768/2)
5557 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5558 /* the phys address is shifted right 12 bits and has an added
5559 1=valid bit added to the 53rd bit
5560 then since this is a wide register(TM)
5561 we split it into two 32 bit writes
5563 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5564 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5565 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5566 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5568 #define CNIC_ILT_LINES 0
5570 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5574 if (CHIP_IS_E1H(bp))
5575 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5577 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5579 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5582 static int bnx2x_init_func(struct bnx2x *bp)
5584 int port = BP_PORT(bp);
5585 int func = BP_FUNC(bp);
5588 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5590 i = FUNC_ILT_BASE(func);
5592 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5593 if (CHIP_IS_E1H(bp)) {
5594 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5595 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5597 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5598 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5601 if (CHIP_IS_E1H(bp)) {
5602 for (i = 0; i < 9; i++)
5603 bnx2x_init_block(bp,
5604 cm_start[func][i], cm_end[func][i]);
5606 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5607 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5610 /* HC init per function */
5611 if (CHIP_IS_E1H(bp)) {
5612 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5614 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5615 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5617 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5619 if (CHIP_IS_E1H(bp))
5620 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5622 /* Reset PCIE errors for debug */
5623 REG_WR(bp, 0x2114, 0xffffffff);
5624 REG_WR(bp, 0x2120, 0xffffffff);
5629 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5633 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5634 BP_FUNC(bp), load_code);
5637 mutex_init(&bp->dmae_mutex);
5638 bnx2x_gunzip_init(bp);
5640 switch (load_code) {
5641 case FW_MSG_CODE_DRV_LOAD_COMMON:
5642 rc = bnx2x_init_common(bp);
5647 case FW_MSG_CODE_DRV_LOAD_PORT:
5649 rc = bnx2x_init_port(bp);
5654 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5656 rc = bnx2x_init_func(bp);
5662 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5666 if (!BP_NOMCP(bp)) {
5667 int func = BP_FUNC(bp);
5669 bp->fw_drv_pulse_wr_seq =
5670 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5671 DRV_PULSE_SEQ_MASK);
5672 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5673 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5674 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5678 /* this needs to be done before gunzip end */
5679 bnx2x_zero_def_sb(bp);
5680 for_each_queue(bp, i)
5681 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5684 bnx2x_gunzip_end(bp);
5689 /* send the MCP a request, block until there is a reply */
5690 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5692 int func = BP_FUNC(bp);
5693 u32 seq = ++bp->fw_seq;
5696 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5698 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5699 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5702 /* let the FW do it's magic ... */
5705 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5707 /* Give the FW up to 2 second (200*10ms) */
5708 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5710 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5711 cnt*delay, rc, seq);
5713 /* is this a reply to our command? */
5714 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5715 rc &= FW_MSG_CODE_MASK;
5719 BNX2X_ERR("FW failed to respond!\n");
5727 static void bnx2x_free_mem(struct bnx2x *bp)
5730 #define BNX2X_PCI_FREE(x, y, size) \
5733 pci_free_consistent(bp->pdev, size, x, y); \
5739 #define BNX2X_FREE(x) \
5750 for_each_queue(bp, i) {
5753 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5754 bnx2x_fp(bp, i, status_blk_mapping),
5755 sizeof(struct host_status_block) +
5756 sizeof(struct eth_tx_db_data));
5758 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5759 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5760 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5761 bnx2x_fp(bp, i, tx_desc_mapping),
5762 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5764 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5765 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5766 bnx2x_fp(bp, i, rx_desc_mapping),
5767 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5769 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5770 bnx2x_fp(bp, i, rx_comp_mapping),
5771 sizeof(struct eth_fast_path_rx_cqe) *
5775 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5776 bnx2x_fp(bp, i, rx_sge_mapping),
5777 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5779 /* end of fastpath */
5781 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5782 sizeof(struct host_def_status_block));
5784 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5785 sizeof(struct bnx2x_slowpath));
5788 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5789 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5790 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5791 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5793 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5795 #undef BNX2X_PCI_FREE
5799 static int bnx2x_alloc_mem(struct bnx2x *bp)
5802 #define BNX2X_PCI_ALLOC(x, y, size) \
5804 x = pci_alloc_consistent(bp->pdev, size, y); \
5806 goto alloc_mem_err; \
5807 memset(x, 0, size); \
5810 #define BNX2X_ALLOC(x, size) \
5812 x = vmalloc(size); \
5814 goto alloc_mem_err; \
5815 memset(x, 0, size); \
5821 for_each_queue(bp, i) {
5822 bnx2x_fp(bp, i, bp) = bp;
5825 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5826 &bnx2x_fp(bp, i, status_blk_mapping),
5827 sizeof(struct host_status_block) +
5828 sizeof(struct eth_tx_db_data));
5830 bnx2x_fp(bp, i, hw_tx_prods) =
5831 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5833 bnx2x_fp(bp, i, tx_prods_mapping) =
5834 bnx2x_fp(bp, i, status_blk_mapping) +
5835 sizeof(struct host_status_block);
5837 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5838 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5839 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5840 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5841 &bnx2x_fp(bp, i, tx_desc_mapping),
5842 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5844 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5845 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5846 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5847 &bnx2x_fp(bp, i, rx_desc_mapping),
5848 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5850 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5851 &bnx2x_fp(bp, i, rx_comp_mapping),
5852 sizeof(struct eth_fast_path_rx_cqe) *
5856 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5857 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5858 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5859 &bnx2x_fp(bp, i, rx_sge_mapping),
5860 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5862 /* end of fastpath */
5864 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5865 sizeof(struct host_def_status_block));
5867 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5868 sizeof(struct bnx2x_slowpath));
5871 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5874 for (i = 0; i < 64*1024; i += 64) {
5875 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5876 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5879 /* allocate searcher T2 table
5880 we allocate 1/4 of alloc num for T2
5881 (which is not entered into the ILT) */
5882 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5885 for (i = 0; i < 16*1024; i += 64)
5886 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5888 /* now fixup the last line in the block to point to the next block */
5889 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5891 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5892 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5894 /* QM queues (128*MAX_CONN) */
5895 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5898 /* Slow path ring */
5899 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5907 #undef BNX2X_PCI_ALLOC
5911 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5915 for_each_queue(bp, i) {
5916 struct bnx2x_fastpath *fp = &bp->fp[i];
5918 u16 bd_cons = fp->tx_bd_cons;
5919 u16 sw_prod = fp->tx_pkt_prod;
5920 u16 sw_cons = fp->tx_pkt_cons;
5922 while (sw_cons != sw_prod) {
5923 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5929 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5933 for_each_queue(bp, j) {
5934 struct bnx2x_fastpath *fp = &bp->fp[j];
5936 for (i = 0; i < NUM_RX_BD; i++) {
5937 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5938 struct sk_buff *skb = rx_buf->skb;
5943 pci_unmap_single(bp->pdev,
5944 pci_unmap_addr(rx_buf, mapping),
5945 bp->rx_buf_use_size,
5946 PCI_DMA_FROMDEVICE);
5951 if (!fp->disable_tpa)
5952 bnx2x_free_tpa_pool(bp, fp,
5953 ETH_MAX_AGGREGATION_QUEUES_E1H);
5957 static void bnx2x_free_skbs(struct bnx2x *bp)
5959 bnx2x_free_tx_skbs(bp);
5960 bnx2x_free_rx_skbs(bp);
5963 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5967 free_irq(bp->msix_table[0].vector, bp->dev);
5968 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5969 bp->msix_table[0].vector);
5971 for_each_queue(bp, i) {
5972 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5973 "state %x\n", i, bp->msix_table[i + offset].vector,
5974 bnx2x_fp(bp, i, state));
5976 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5977 BNX2X_ERR("IRQ of fp #%d being freed while "
5978 "state != closed\n", i);
5980 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5984 static void bnx2x_free_irq(struct bnx2x *bp)
5986 if (bp->flags & USING_MSIX_FLAG) {
5987 bnx2x_free_msix_irqs(bp);
5988 pci_disable_msix(bp->pdev);
5989 bp->flags &= ~USING_MSIX_FLAG;
5992 free_irq(bp->pdev->irq, bp->dev);
5995 static int bnx2x_enable_msix(struct bnx2x *bp)
5999 bp->msix_table[0].entry = 0;
6001 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6003 for_each_queue(bp, i) {
6004 int igu_vec = offset + i + BP_L_ID(bp);
6006 bp->msix_table[i + offset].entry = igu_vec;
6007 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6008 "(fastpath #%u)\n", i + offset, igu_vec, i);
6011 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6012 bp->num_queues + offset);
6014 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6017 bp->flags |= USING_MSIX_FLAG;
6022 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6024 int i, rc, offset = 1;
6026 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6027 bp->dev->name, bp->dev);
6029 BNX2X_ERR("request sp irq failed\n");
6033 for_each_queue(bp, i) {
6034 rc = request_irq(bp->msix_table[i + offset].vector,
6035 bnx2x_msix_fp_int, 0,
6036 bp->dev->name, &bp->fp[i]);
6038 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6040 bnx2x_free_msix_irqs(bp);
6044 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6050 static int bnx2x_req_irq(struct bnx2x *bp)
6054 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6055 bp->dev->name, bp->dev);
6057 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6063 * Init service functions
6066 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6068 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6069 int port = BP_PORT(bp);
6072 * unicasts 0-31:port0 32-63:port1
6073 * multicast 64-127:port0 128-191:port1
6075 config->hdr.length_6b = 2;
6076 config->hdr.offset = port ? 31 : 0;
6077 config->hdr.client_id = BP_CL_ID(bp);
6078 config->hdr.reserved1 = 0;
6081 config->config_table[0].cam_entry.msb_mac_addr =
6082 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6083 config->config_table[0].cam_entry.middle_mac_addr =
6084 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6085 config->config_table[0].cam_entry.lsb_mac_addr =
6086 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6087 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6088 config->config_table[0].target_table_entry.flags = 0;
6089 config->config_table[0].target_table_entry.client_id = 0;
6090 config->config_table[0].target_table_entry.vlan_id = 0;
6092 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6093 config->config_table[0].cam_entry.msb_mac_addr,
6094 config->config_table[0].cam_entry.middle_mac_addr,
6095 config->config_table[0].cam_entry.lsb_mac_addr);
6098 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6099 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6100 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6101 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6102 config->config_table[1].target_table_entry.flags =
6103 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6104 config->config_table[1].target_table_entry.client_id = 0;
6105 config->config_table[1].target_table_entry.vlan_id = 0;
6107 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6108 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6109 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6112 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6114 struct mac_configuration_cmd_e1h *config =
6115 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6117 if (bp->state != BNX2X_STATE_OPEN) {
6118 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6122 /* CAM allocation for E1H
6123 * unicasts: by func number
6124 * multicast: 20+FUNC*20, 20 each
6126 config->hdr.length_6b = 1;
6127 config->hdr.offset = BP_FUNC(bp);
6128 config->hdr.client_id = BP_CL_ID(bp);
6129 config->hdr.reserved1 = 0;
6132 config->config_table[0].msb_mac_addr =
6133 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6134 config->config_table[0].middle_mac_addr =
6135 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6136 config->config_table[0].lsb_mac_addr =
6137 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6138 config->config_table[0].client_id = BP_L_ID(bp);
6139 config->config_table[0].vlan_id = 0;
6140 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6141 config->config_table[0].flags = BP_PORT(bp);
6143 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6144 config->config_table[0].msb_mac_addr,
6145 config->config_table[0].middle_mac_addr,
6146 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6148 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6149 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6150 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6153 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6154 int *state_p, int poll)
6156 /* can take a while if any port is running */
6159 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6160 poll ? "polling" : "waiting", state, idx);
6165 bnx2x_rx_int(bp->fp, 10);
6166 /* if index is different from 0
6167 * the reply for some commands will
6168 * be on the none default queue
6171 bnx2x_rx_int(&bp->fp[idx], 10);
6173 mb(); /* state is changed by bnx2x_sp_event() */
6175 if (*state_p == state)
6182 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6183 poll ? "polling" : "waiting", state, idx);
6184 #ifdef BNX2X_STOP_ON_ERROR
6191 static int bnx2x_setup_leading(struct bnx2x *bp)
6195 /* reset IGU state */
6196 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6199 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6201 /* Wait for completion */
6202 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6207 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6209 /* reset IGU state */
6210 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6213 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6214 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6216 /* Wait for completion */
6217 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6218 &(bp->fp[index].state), 0);
6221 static int bnx2x_poll(struct napi_struct *napi, int budget);
6222 static void bnx2x_set_rx_mode(struct net_device *dev);
6224 /* must be called with rtnl_lock */
6225 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6230 #ifdef BNX2X_STOP_ON_ERROR
6231 if (unlikely(bp->panic))
6235 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6237 /* Send LOAD_REQUEST command to MCP
6238 Returns the type of LOAD command:
6239 if it is the first port to be initialized
6240 common blocks should be initialized, otherwise - not
6242 if (!BP_NOMCP(bp)) {
6243 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6245 BNX2X_ERR("MCP response failure, aborting\n");
6248 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6249 return -EBUSY; /* other port in diagnostic mode */
6252 int port = BP_PORT(bp);
6254 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6255 load_count[0], load_count[1], load_count[2]);
6257 load_count[1 + port]++;
6258 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6259 load_count[0], load_count[1], load_count[2]);
6260 if (load_count[0] == 1)
6261 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6262 else if (load_count[1 + port] == 1)
6263 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6265 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6268 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6269 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6273 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6275 /* if we can't use MSI-X we only need one fp,
6276 * so try to enable MSI-X with the requested number of fp's
6277 * and fallback to inta with one fp
6283 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6284 /* user requested number */
6285 bp->num_queues = use_multi;
6288 bp->num_queues = min_t(u32, num_online_cpus(),
6293 if (bnx2x_enable_msix(bp)) {
6294 /* failed to enable MSI-X */
6297 BNX2X_ERR("Multi requested but failed"
6298 " to enable MSI-X\n");
6302 "set number of queues to %d\n", bp->num_queues);
6304 if (bnx2x_alloc_mem(bp))
6307 for_each_queue(bp, i)
6308 bnx2x_fp(bp, i, disable_tpa) =
6309 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6311 if (bp->flags & USING_MSIX_FLAG) {
6312 rc = bnx2x_req_msix_irqs(bp);
6314 pci_disable_msix(bp->pdev);
6319 rc = bnx2x_req_irq(bp);
6321 BNX2X_ERR("IRQ request failed, aborting\n");
6326 for_each_queue(bp, i)
6327 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6331 rc = bnx2x_init_hw(bp, load_code);
6333 BNX2X_ERR("HW init failed, aborting\n");
6337 /* Setup NIC internals and enable interrupts */
6338 bnx2x_nic_init(bp, load_code);
6340 /* Send LOAD_DONE command to MCP */
6341 if (!BP_NOMCP(bp)) {
6342 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6344 BNX2X_ERR("MCP response failure, aborting\n");
6346 goto load_int_disable;
6350 bnx2x_stats_init(bp);
6352 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6354 /* Enable Rx interrupt handling before sending the ramrod
6355 as it's completed on Rx FP queue */
6356 for_each_queue(bp, i)
6357 napi_enable(&bnx2x_fp(bp, i, napi));
6359 /* Enable interrupt handling */
6360 atomic_set(&bp->intr_sem, 0);
6362 rc = bnx2x_setup_leading(bp);
6364 BNX2X_ERR("Setup leading failed!\n");
6365 goto load_stop_netif;
6368 if (CHIP_IS_E1H(bp))
6369 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6370 BNX2X_ERR("!!! mf_cfg function disabled\n");
6371 bp->state = BNX2X_STATE_DISABLED;
6374 if (bp->state == BNX2X_STATE_OPEN)
6375 for_each_nondefault_queue(bp, i) {
6376 rc = bnx2x_setup_multi(bp, i);
6378 goto load_stop_netif;
6382 bnx2x_set_mac_addr_e1(bp);
6384 bnx2x_set_mac_addr_e1h(bp);
6387 bnx2x_initial_phy_init(bp);
6389 /* Start fast path */
6390 switch (load_mode) {
6392 /* Tx queue should be only reenabled */
6393 netif_wake_queue(bp->dev);
6394 bnx2x_set_rx_mode(bp->dev);
6398 netif_start_queue(bp->dev);
6399 bnx2x_set_rx_mode(bp->dev);
6400 if (bp->flags & USING_MSIX_FLAG)
6401 printk(KERN_INFO PFX "%s: using MSI-X\n",
6406 bnx2x_set_rx_mode(bp->dev);
6407 bp->state = BNX2X_STATE_DIAG;
6415 bnx2x__link_status_update(bp);
6417 /* start the timer */
6418 mod_timer(&bp->timer, jiffies + bp->current_interval);
6424 for_each_queue(bp, i)
6425 napi_disable(&bnx2x_fp(bp, i, napi));
6428 bnx2x_int_disable_sync(bp);
6433 /* Free SKBs, SGEs, TPA pool and driver internals */
6434 bnx2x_free_skbs(bp);
6435 for_each_queue(bp, i)
6436 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6437 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6441 /* TBD we really need to reset the chip
6442 if we want to recover from this */
6446 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6450 /* halt the connection */
6451 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6452 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6454 /* Wait for completion */
6455 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6456 &(bp->fp[index].state), 1);
6457 if (rc) /* timeout */
6460 /* delete cfc entry */
6461 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6463 /* Wait for completion */
6464 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6465 &(bp->fp[index].state), 1);
6469 static int bnx2x_stop_leading(struct bnx2x *bp)
6471 u16 dsb_sp_prod_idx;
6472 /* if the other port is handling traffic,
6473 this can take a lot of time */
6479 /* Send HALT ramrod */
6480 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6481 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6483 /* Wait for completion */
6484 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6485 &(bp->fp[0].state), 1);
6486 if (rc) /* timeout */
6489 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6491 /* Send PORT_DELETE ramrod */
6492 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6494 /* Wait for completion to arrive on default status block
6495 we are going to reset the chip anyway
6496 so there is not much to do if this times out
6498 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6501 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6502 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6503 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6504 #ifdef BNX2X_STOP_ON_ERROR
6514 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6515 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6520 static void bnx2x_reset_func(struct bnx2x *bp)
6522 int port = BP_PORT(bp);
6523 int func = BP_FUNC(bp);
6527 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6528 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6530 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6533 base = FUNC_ILT_BASE(func);
6534 for (i = base; i < base + ILT_PER_FUNC; i++)
6535 bnx2x_ilt_wr(bp, i, 0);
6538 static void bnx2x_reset_port(struct bnx2x *bp)
6540 int port = BP_PORT(bp);
6543 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6545 /* Do not rcv packets to BRB */
6546 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6547 /* Do not direct rcv packets that are not for MCP to the BRB */
6548 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6549 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6552 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6555 /* Check for BRB port occupancy */
6556 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6558 DP(NETIF_MSG_IFDOWN,
6559 "BRB1 is not empty %d blooks are occupied\n", val);
6561 /* TODO: Close Doorbell port? */
6564 static void bnx2x_reset_common(struct bnx2x *bp)
6567 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6569 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6572 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6574 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6575 BP_FUNC(bp), reset_code);
6577 switch (reset_code) {
6578 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6579 bnx2x_reset_port(bp);
6580 bnx2x_reset_func(bp);
6581 bnx2x_reset_common(bp);
6584 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6585 bnx2x_reset_port(bp);
6586 bnx2x_reset_func(bp);
6589 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6590 bnx2x_reset_func(bp);
6594 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6599 /* msut be called with rtnl_lock */
6600 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6602 int port = BP_PORT(bp);
6606 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6608 bp->rx_mode = BNX2X_RX_MODE_NONE;
6609 bnx2x_set_storm_rx_mode(bp);
6611 if (netif_running(bp->dev)) {
6612 netif_tx_disable(bp->dev);
6613 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6616 del_timer_sync(&bp->timer);
6617 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6618 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6619 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6621 /* Wait until tx fast path tasks complete */
6622 for_each_queue(bp, i) {
6623 struct bnx2x_fastpath *fp = &bp->fp[i];
6627 while (BNX2X_HAS_TX_WORK(fp)) {
6629 if (!netif_running(bp->dev))
6630 bnx2x_tx_int(fp, 1000);
6633 BNX2X_ERR("timeout waiting for queue[%d]\n",
6635 #ifdef BNX2X_STOP_ON_ERROR
6648 /* Give HW time to discard old tx messages */
6651 for_each_queue(bp, i)
6652 napi_disable(&bnx2x_fp(bp, i, napi));
6653 /* Disable interrupts after Tx and Rx are disabled on stack level */
6654 bnx2x_int_disable_sync(bp);
6659 if (unload_mode == UNLOAD_NORMAL)
6660 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6662 else if (bp->flags & NO_WOL_FLAG) {
6663 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6664 if (CHIP_IS_E1H(bp))
6665 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6667 } else if (bp->wol) {
6668 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6669 u8 *mac_addr = bp->dev->dev_addr;
6671 /* The mac address is written to entries 1-4 to
6672 preserve entry 0 which is used by the PMF */
6673 u8 entry = (BP_E1HVN(bp) + 1)*8;
6675 val = (mac_addr[0] << 8) | mac_addr[1];
6676 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6678 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6679 (mac_addr[4] << 8) | mac_addr[5];
6680 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6682 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6685 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6687 if (CHIP_IS_E1H(bp))
6688 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6690 /* Close multi and leading connections
6691 Completions for ramrods are collected in a synchronous way */
6692 for_each_nondefault_queue(bp, i)
6693 if (bnx2x_stop_multi(bp, i))
6696 rc = bnx2x_stop_leading(bp);
6698 BNX2X_ERR("Stop leading failed!\n");
6699 #ifdef BNX2X_STOP_ON_ERROR
6708 reset_code = bnx2x_fw_command(bp, reset_code);
6710 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6711 load_count[0], load_count[1], load_count[2]);
6713 load_count[1 + port]--;
6714 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6715 load_count[0], load_count[1], load_count[2]);
6716 if (load_count[0] == 0)
6717 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6718 else if (load_count[1 + port] == 0)
6719 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6721 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6724 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6725 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6726 bnx2x__link_reset(bp);
6728 /* Reset the chip */
6729 bnx2x_reset_chip(bp, reset_code);
6731 /* Report UNLOAD_DONE to MCP */
6733 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6735 /* Free SKBs, SGEs, TPA pool and driver internals */
6736 bnx2x_free_skbs(bp);
6737 for_each_queue(bp, i)
6738 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6739 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6742 bp->state = BNX2X_STATE_CLOSED;
6744 netif_carrier_off(bp->dev);
6749 static void bnx2x_reset_task(struct work_struct *work)
6751 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6753 #ifdef BNX2X_STOP_ON_ERROR
6754 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6755 " so reset not done to allow debug dump,\n"
6756 KERN_ERR " you will need to reboot when done\n");
6762 if (!netif_running(bp->dev))
6763 goto reset_task_exit;
6765 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6766 bnx2x_nic_load(bp, LOAD_NORMAL);
6772 /* end of nic load/unload */
6777 * Init service functions
6780 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6784 /* Check if there is any driver already loaded */
6785 val = REG_RD(bp, MISC_REG_UNPREPARED);
6787 /* Check if it is the UNDI driver
6788 * UNDI driver initializes CID offset for normal bell to 0x7
6790 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6791 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6793 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6795 int func = BP_FUNC(bp);
6799 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6801 /* try unload UNDI on port 0 */
6804 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6805 DRV_MSG_SEQ_NUMBER_MASK);
6806 reset_code = bnx2x_fw_command(bp, reset_code);
6808 /* if UNDI is loaded on the other port */
6809 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6811 /* send "DONE" for previous unload */
6812 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6814 /* unload UNDI on port 1 */
6817 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6818 DRV_MSG_SEQ_NUMBER_MASK);
6819 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6821 bnx2x_fw_command(bp, reset_code);
6824 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6825 HC_REG_CONFIG_0), 0x1000);
6827 /* close input traffic and wait for it */
6828 /* Do not rcv packets to BRB */
6830 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6831 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6832 /* Do not direct rcv packets that are not for MCP to
6835 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6836 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6839 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6840 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6843 /* save NIG port swap info */
6844 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6845 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6848 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6851 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6853 /* take the NIG out of reset and restore swap values */
6855 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6856 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6857 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6858 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6860 /* send unload done to the MCP */
6861 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6863 /* restore our func and fw_seq */
6866 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6867 DRV_MSG_SEQ_NUMBER_MASK);
6869 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6873 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6875 u32 val, val2, val3, val4, id;
6877 /* Get the chip revision id and number. */
6878 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6879 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6880 id = ((val & 0xffff) << 16);
6881 val = REG_RD(bp, MISC_REG_CHIP_REV);
6882 id |= ((val & 0xf) << 12);
6883 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6884 id |= ((val & 0xff) << 4);
6885 REG_RD(bp, MISC_REG_BOND_ID);
6887 bp->common.chip_id = id;
6888 bp->link_params.chip_id = bp->common.chip_id;
6889 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6891 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6892 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6893 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6894 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6895 bp->common.flash_size, bp->common.flash_size);
6897 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6898 bp->link_params.shmem_base = bp->common.shmem_base;
6899 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6901 if (!bp->common.shmem_base ||
6902 (bp->common.shmem_base < 0xA0000) ||
6903 (bp->common.shmem_base >= 0xC0000)) {
6904 BNX2X_DEV_INFO("MCP not active\n");
6905 bp->flags |= NO_MCP_FLAG;
6909 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6910 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6911 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6912 BNX2X_ERR("BAD MCP validity signature\n");
6914 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6915 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6917 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6918 bp->common.hw_config, bp->common.board);
6920 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6921 SHARED_HW_CFG_LED_MODE_MASK) >>
6922 SHARED_HW_CFG_LED_MODE_SHIFT);
6924 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6925 bp->common.bc_ver = val;
6926 BNX2X_DEV_INFO("bc_ver %X\n", val);
6927 if (val < BNX2X_BC_VER) {
6928 /* for now only warn
6929 * later we might need to enforce this */
6930 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6931 " please upgrade BC\n", BNX2X_BC_VER, val);
6933 BNX2X_DEV_INFO("%sWoL Capable\n",
6934 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6936 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6937 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6938 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6939 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6941 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6942 val, val2, val3, val4);
6945 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6948 int port = BP_PORT(bp);
6951 switch (switch_cfg) {
6953 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6956 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6957 switch (ext_phy_type) {
6958 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6959 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6962 bp->port.supported |= (SUPPORTED_10baseT_Half |
6963 SUPPORTED_10baseT_Full |
6964 SUPPORTED_100baseT_Half |
6965 SUPPORTED_100baseT_Full |
6966 SUPPORTED_1000baseT_Full |
6967 SUPPORTED_2500baseX_Full |
6972 SUPPORTED_Asym_Pause);
6975 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6976 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6979 bp->port.supported |= (SUPPORTED_10baseT_Half |
6980 SUPPORTED_10baseT_Full |
6981 SUPPORTED_100baseT_Half |
6982 SUPPORTED_100baseT_Full |
6983 SUPPORTED_1000baseT_Full |
6988 SUPPORTED_Asym_Pause);
6992 BNX2X_ERR("NVRAM config error. "
6993 "BAD SerDes ext_phy_config 0x%x\n",
6994 bp->link_params.ext_phy_config);
6998 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7000 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7003 case SWITCH_CFG_10G:
7004 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7007 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7008 switch (ext_phy_type) {
7009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7010 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7013 bp->port.supported |= (SUPPORTED_10baseT_Half |
7014 SUPPORTED_10baseT_Full |
7015 SUPPORTED_100baseT_Half |
7016 SUPPORTED_100baseT_Full |
7017 SUPPORTED_1000baseT_Full |
7018 SUPPORTED_2500baseX_Full |
7019 SUPPORTED_10000baseT_Full |
7024 SUPPORTED_Asym_Pause);
7027 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7028 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7031 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7034 SUPPORTED_Asym_Pause);
7037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7038 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7041 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7042 SUPPORTED_1000baseT_Full |
7045 SUPPORTED_Asym_Pause);
7048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7049 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7052 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7053 SUPPORTED_1000baseT_Full |
7057 SUPPORTED_Asym_Pause);
7060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7061 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7064 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7065 SUPPORTED_2500baseX_Full |
7066 SUPPORTED_1000baseT_Full |
7070 SUPPORTED_Asym_Pause);
7073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7074 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7077 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7081 SUPPORTED_Asym_Pause);
7084 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7085 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7086 bp->link_params.ext_phy_config);
7090 BNX2X_ERR("NVRAM config error. "
7091 "BAD XGXS ext_phy_config 0x%x\n",
7092 bp->link_params.ext_phy_config);
7096 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7098 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7103 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7104 bp->port.link_config);
7107 bp->link_params.phy_addr = bp->port.phy_addr;
7109 /* mask what we support according to speed_cap_mask */
7110 if (!(bp->link_params.speed_cap_mask &
7111 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7112 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7114 if (!(bp->link_params.speed_cap_mask &
7115 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7116 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7118 if (!(bp->link_params.speed_cap_mask &
7119 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7120 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7122 if (!(bp->link_params.speed_cap_mask &
7123 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7124 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7126 if (!(bp->link_params.speed_cap_mask &
7127 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7128 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7129 SUPPORTED_1000baseT_Full);
7131 if (!(bp->link_params.speed_cap_mask &
7132 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7133 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7135 if (!(bp->link_params.speed_cap_mask &
7136 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7137 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7139 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7142 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7144 bp->link_params.req_duplex = DUPLEX_FULL;
7146 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7147 case PORT_FEATURE_LINK_SPEED_AUTO:
7148 if (bp->port.supported & SUPPORTED_Autoneg) {
7149 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7150 bp->port.advertising = bp->port.supported;
7153 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7155 if ((ext_phy_type ==
7156 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7158 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7159 /* force 10G, no AN */
7160 bp->link_params.req_line_speed = SPEED_10000;
7161 bp->port.advertising =
7162 (ADVERTISED_10000baseT_Full |
7166 BNX2X_ERR("NVRAM config error. "
7167 "Invalid link_config 0x%x"
7168 " Autoneg not supported\n",
7169 bp->port.link_config);
7174 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7175 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7176 bp->link_params.req_line_speed = SPEED_10;
7177 bp->port.advertising = (ADVERTISED_10baseT_Full |
7180 BNX2X_ERR("NVRAM config error. "
7181 "Invalid link_config 0x%x"
7182 " speed_cap_mask 0x%x\n",
7183 bp->port.link_config,
7184 bp->link_params.speed_cap_mask);
7189 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7190 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7191 bp->link_params.req_line_speed = SPEED_10;
7192 bp->link_params.req_duplex = DUPLEX_HALF;
7193 bp->port.advertising = (ADVERTISED_10baseT_Half |
7196 BNX2X_ERR("NVRAM config error. "
7197 "Invalid link_config 0x%x"
7198 " speed_cap_mask 0x%x\n",
7199 bp->port.link_config,
7200 bp->link_params.speed_cap_mask);
7205 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7206 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7207 bp->link_params.req_line_speed = SPEED_100;
7208 bp->port.advertising = (ADVERTISED_100baseT_Full |
7211 BNX2X_ERR("NVRAM config error. "
7212 "Invalid link_config 0x%x"
7213 " speed_cap_mask 0x%x\n",
7214 bp->port.link_config,
7215 bp->link_params.speed_cap_mask);
7220 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7221 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7222 bp->link_params.req_line_speed = SPEED_100;
7223 bp->link_params.req_duplex = DUPLEX_HALF;
7224 bp->port.advertising = (ADVERTISED_100baseT_Half |
7227 BNX2X_ERR("NVRAM config error. "
7228 "Invalid link_config 0x%x"
7229 " speed_cap_mask 0x%x\n",
7230 bp->port.link_config,
7231 bp->link_params.speed_cap_mask);
7236 case PORT_FEATURE_LINK_SPEED_1G:
7237 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7238 bp->link_params.req_line_speed = SPEED_1000;
7239 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7242 BNX2X_ERR("NVRAM config error. "
7243 "Invalid link_config 0x%x"
7244 " speed_cap_mask 0x%x\n",
7245 bp->port.link_config,
7246 bp->link_params.speed_cap_mask);
7251 case PORT_FEATURE_LINK_SPEED_2_5G:
7252 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7253 bp->link_params.req_line_speed = SPEED_2500;
7254 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7257 BNX2X_ERR("NVRAM config error. "
7258 "Invalid link_config 0x%x"
7259 " speed_cap_mask 0x%x\n",
7260 bp->port.link_config,
7261 bp->link_params.speed_cap_mask);
7266 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7267 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7268 case PORT_FEATURE_LINK_SPEED_10G_KR:
7269 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7270 bp->link_params.req_line_speed = SPEED_10000;
7271 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7274 BNX2X_ERR("NVRAM config error. "
7275 "Invalid link_config 0x%x"
7276 " speed_cap_mask 0x%x\n",
7277 bp->port.link_config,
7278 bp->link_params.speed_cap_mask);
7284 BNX2X_ERR("NVRAM config error. "
7285 "BAD link speed link_config 0x%x\n",
7286 bp->port.link_config);
7287 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7288 bp->port.advertising = bp->port.supported;
7292 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7293 PORT_FEATURE_FLOW_CONTROL_MASK);
7294 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7295 !(bp->port.supported & SUPPORTED_Autoneg))
7296 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7298 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7299 " advertising 0x%x\n",
7300 bp->link_params.req_line_speed,
7301 bp->link_params.req_duplex,
7302 bp->link_params.req_flow_ctrl, bp->port.advertising);
7305 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7307 int port = BP_PORT(bp);
7310 bp->link_params.bp = bp;
7311 bp->link_params.port = port;
7313 bp->link_params.serdes_config =
7314 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7315 bp->link_params.lane_config =
7316 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7317 bp->link_params.ext_phy_config =
7319 dev_info.port_hw_config[port].external_phy_config);
7320 bp->link_params.speed_cap_mask =
7322 dev_info.port_hw_config[port].speed_capability_mask);
7324 bp->port.link_config =
7325 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7327 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7328 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7329 " link_config 0x%08x\n",
7330 bp->link_params.serdes_config,
7331 bp->link_params.lane_config,
7332 bp->link_params.ext_phy_config,
7333 bp->link_params.speed_cap_mask, bp->port.link_config);
7335 bp->link_params.switch_cfg = (bp->port.link_config &
7336 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7337 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7339 bnx2x_link_settings_requested(bp);
7341 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7342 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7343 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7344 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7345 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7346 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7347 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7348 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7349 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7350 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7353 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7355 int func = BP_FUNC(bp);
7359 bnx2x_get_common_hwinfo(bp);
7363 if (CHIP_IS_E1H(bp)) {
7365 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7368 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7369 FUNC_MF_CFG_E1HOV_TAG_MASK);
7370 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7374 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7376 func, bp->e1hov, bp->e1hov);
7378 BNX2X_DEV_INFO("Single function mode\n");
7380 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7381 " aborting\n", func);
7387 if (!BP_NOMCP(bp)) {
7388 bnx2x_get_port_hwinfo(bp);
7390 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7391 DRV_MSG_SEQ_NUMBER_MASK);
7392 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7396 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7397 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7398 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7399 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7400 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7401 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7402 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7403 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7404 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7405 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7406 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7408 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7416 /* only supposed to happen on emulation/FPGA */
7417 BNX2X_ERR("warning rendom MAC workaround active\n");
7418 random_ether_addr(bp->dev->dev_addr);
7419 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7425 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7427 int func = BP_FUNC(bp);
7430 /* Disable interrupt handling until HW is initialized */
7431 atomic_set(&bp->intr_sem, 1);
7433 mutex_init(&bp->port.phy_mutex);
7435 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7436 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7438 rc = bnx2x_get_hwinfo(bp);
7440 /* need to reset chip if undi was active */
7442 bnx2x_undi_unload(bp);
7444 if (CHIP_REV_IS_FPGA(bp))
7445 printk(KERN_ERR PFX "FPGA detected\n");
7447 if (BP_NOMCP(bp) && (func == 0))
7449 "MCP disabled, must load devices in order!\n");
7453 bp->flags &= ~TPA_ENABLE_FLAG;
7454 bp->dev->features &= ~NETIF_F_LRO;
7456 bp->flags |= TPA_ENABLE_FLAG;
7457 bp->dev->features |= NETIF_F_LRO;
7461 bp->tx_ring_size = MAX_TX_AVAIL;
7462 bp->rx_ring_size = MAX_RX_AVAIL;
7470 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7471 bp->current_interval = (poll ? poll : bp->timer_interval);
7473 init_timer(&bp->timer);
7474 bp->timer.expires = jiffies + bp->current_interval;
7475 bp->timer.data = (unsigned long) bp;
7476 bp->timer.function = bnx2x_timer;
7482 * ethtool service functions
7485 /* All ethtool functions called with rtnl_lock */
7487 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7489 struct bnx2x *bp = netdev_priv(dev);
7491 cmd->supported = bp->port.supported;
7492 cmd->advertising = bp->port.advertising;
7494 if (netif_carrier_ok(dev)) {
7495 cmd->speed = bp->link_vars.line_speed;
7496 cmd->duplex = bp->link_vars.duplex;
7498 cmd->speed = bp->link_params.req_line_speed;
7499 cmd->duplex = bp->link_params.req_duplex;
7504 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7505 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7506 if (vn_max_rate < cmd->speed)
7507 cmd->speed = vn_max_rate;
7510 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7512 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7514 switch (ext_phy_type) {
7515 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7516 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7517 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7519 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7520 cmd->port = PORT_FIBRE;
7523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7524 cmd->port = PORT_TP;
7527 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7528 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7529 bp->link_params.ext_phy_config);
7533 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7534 bp->link_params.ext_phy_config);
7538 cmd->port = PORT_TP;
7540 cmd->phy_address = bp->port.phy_addr;
7541 cmd->transceiver = XCVR_INTERNAL;
7543 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7544 cmd->autoneg = AUTONEG_ENABLE;
7546 cmd->autoneg = AUTONEG_DISABLE;
7551 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7552 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7553 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7554 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7555 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7556 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7557 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7562 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7564 struct bnx2x *bp = netdev_priv(dev);
7570 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7571 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7572 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7573 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7574 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7575 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7576 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7578 if (cmd->autoneg == AUTONEG_ENABLE) {
7579 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7580 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7584 /* advertise the requested speed and duplex if supported */
7585 cmd->advertising &= bp->port.supported;
7587 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7588 bp->link_params.req_duplex = DUPLEX_FULL;
7589 bp->port.advertising |= (ADVERTISED_Autoneg |
7592 } else { /* forced speed */
7593 /* advertise the requested speed and duplex if supported */
7594 switch (cmd->speed) {
7596 if (cmd->duplex == DUPLEX_FULL) {
7597 if (!(bp->port.supported &
7598 SUPPORTED_10baseT_Full)) {
7600 "10M full not supported\n");
7604 advertising = (ADVERTISED_10baseT_Full |
7607 if (!(bp->port.supported &
7608 SUPPORTED_10baseT_Half)) {
7610 "10M half not supported\n");
7614 advertising = (ADVERTISED_10baseT_Half |
7620 if (cmd->duplex == DUPLEX_FULL) {
7621 if (!(bp->port.supported &
7622 SUPPORTED_100baseT_Full)) {
7624 "100M full not supported\n");
7628 advertising = (ADVERTISED_100baseT_Full |
7631 if (!(bp->port.supported &
7632 SUPPORTED_100baseT_Half)) {
7634 "100M half not supported\n");
7638 advertising = (ADVERTISED_100baseT_Half |
7644 if (cmd->duplex != DUPLEX_FULL) {
7645 DP(NETIF_MSG_LINK, "1G half not supported\n");
7649 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7650 DP(NETIF_MSG_LINK, "1G full not supported\n");
7654 advertising = (ADVERTISED_1000baseT_Full |
7659 if (cmd->duplex != DUPLEX_FULL) {
7661 "2.5G half not supported\n");
7665 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7667 "2.5G full not supported\n");
7671 advertising = (ADVERTISED_2500baseX_Full |
7676 if (cmd->duplex != DUPLEX_FULL) {
7677 DP(NETIF_MSG_LINK, "10G half not supported\n");
7681 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7682 DP(NETIF_MSG_LINK, "10G full not supported\n");
7686 advertising = (ADVERTISED_10000baseT_Full |
7691 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7695 bp->link_params.req_line_speed = cmd->speed;
7696 bp->link_params.req_duplex = cmd->duplex;
7697 bp->port.advertising = advertising;
7700 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7701 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7702 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7703 bp->port.advertising);
7705 if (netif_running(dev)) {
7706 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7713 #define PHY_FW_VER_LEN 10
7715 static void bnx2x_get_drvinfo(struct net_device *dev,
7716 struct ethtool_drvinfo *info)
7718 struct bnx2x *bp = netdev_priv(dev);
7719 char phy_fw_ver[PHY_FW_VER_LEN];
7721 strcpy(info->driver, DRV_MODULE_NAME);
7722 strcpy(info->version, DRV_MODULE_VERSION);
7724 phy_fw_ver[0] = '\0';
7726 bnx2x_acquire_phy_lock(bp);
7727 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7728 (bp->state != BNX2X_STATE_CLOSED),
7729 phy_fw_ver, PHY_FW_VER_LEN);
7730 bnx2x_release_phy_lock(bp);
7733 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7734 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7735 BCM_5710_FW_REVISION_VERSION,
7736 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7737 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7738 strcpy(info->bus_info, pci_name(bp->pdev));
7739 info->n_stats = BNX2X_NUM_STATS;
7740 info->testinfo_len = BNX2X_NUM_TESTS;
7741 info->eedump_len = bp->common.flash_size;
7742 info->regdump_len = 0;
7745 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7747 struct bnx2x *bp = netdev_priv(dev);
7749 if (bp->flags & NO_WOL_FLAG) {
7753 wol->supported = WAKE_MAGIC;
7755 wol->wolopts = WAKE_MAGIC;
7759 memset(&wol->sopass, 0, sizeof(wol->sopass));
7762 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7764 struct bnx2x *bp = netdev_priv(dev);
7766 if (wol->wolopts & ~WAKE_MAGIC)
7769 if (wol->wolopts & WAKE_MAGIC) {
7770 if (bp->flags & NO_WOL_FLAG)
7780 static u32 bnx2x_get_msglevel(struct net_device *dev)
7782 struct bnx2x *bp = netdev_priv(dev);
7784 return bp->msglevel;
7787 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7789 struct bnx2x *bp = netdev_priv(dev);
7791 if (capable(CAP_NET_ADMIN))
7792 bp->msglevel = level;
7795 static int bnx2x_nway_reset(struct net_device *dev)
7797 struct bnx2x *bp = netdev_priv(dev);
7802 if (netif_running(dev)) {
7803 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7810 static int bnx2x_get_eeprom_len(struct net_device *dev)
7812 struct bnx2x *bp = netdev_priv(dev);
7814 return bp->common.flash_size;
7817 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7819 int port = BP_PORT(bp);
7823 /* adjust timeout for emulation/FPGA */
7824 count = NVRAM_TIMEOUT_COUNT;
7825 if (CHIP_REV_IS_SLOW(bp))
7828 /* request access to nvram interface */
7829 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7830 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7832 for (i = 0; i < count*10; i++) {
7833 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7834 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7840 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7841 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7848 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7850 int port = BP_PORT(bp);
7854 /* adjust timeout for emulation/FPGA */
7855 count = NVRAM_TIMEOUT_COUNT;
7856 if (CHIP_REV_IS_SLOW(bp))
7859 /* relinquish nvram interface */
7860 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7861 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7863 for (i = 0; i < count*10; i++) {
7864 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7865 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7871 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7872 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7879 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7883 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7885 /* enable both bits, even on read */
7886 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7887 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7888 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7891 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7895 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7897 /* disable both bits, even after read */
7898 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7899 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7900 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7903 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7909 /* build the command word */
7910 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7912 /* need to clear DONE bit separately */
7913 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7915 /* address of the NVRAM to read from */
7916 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7917 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7919 /* issue a read command */
7920 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7922 /* adjust timeout for emulation/FPGA */
7923 count = NVRAM_TIMEOUT_COUNT;
7924 if (CHIP_REV_IS_SLOW(bp))
7927 /* wait for completion */
7930 for (i = 0; i < count; i++) {
7932 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7934 if (val & MCPR_NVM_COMMAND_DONE) {
7935 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7936 /* we read nvram data in cpu order
7937 * but ethtool sees it as an array of bytes
7938 * converting to big-endian will do the work */
7939 val = cpu_to_be32(val);
7949 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7956 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7958 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7963 if (offset + buf_size > bp->common.flash_size) {
7964 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7965 " buf_size (0x%x) > flash_size (0x%x)\n",
7966 offset, buf_size, bp->common.flash_size);
7970 /* request access to nvram interface */
7971 rc = bnx2x_acquire_nvram_lock(bp);
7975 /* enable access to nvram interface */
7976 bnx2x_enable_nvram_access(bp);
7978 /* read the first word(s) */
7979 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7980 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7981 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7982 memcpy(ret_buf, &val, 4);
7984 /* advance to the next dword */
7985 offset += sizeof(u32);
7986 ret_buf += sizeof(u32);
7987 buf_size -= sizeof(u32);
7992 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7993 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7994 memcpy(ret_buf, &val, 4);
7997 /* disable access to nvram interface */
7998 bnx2x_disable_nvram_access(bp);
7999 bnx2x_release_nvram_lock(bp);
8004 static int bnx2x_get_eeprom(struct net_device *dev,
8005 struct ethtool_eeprom *eeprom, u8 *eebuf)
8007 struct bnx2x *bp = netdev_priv(dev);
8010 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8011 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8012 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8013 eeprom->len, eeprom->len);
8015 /* parameters already validated in ethtool_get_eeprom */
8017 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8022 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8027 /* build the command word */
8028 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8030 /* need to clear DONE bit separately */
8031 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8033 /* write the data */
8034 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8036 /* address of the NVRAM to write to */
8037 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8038 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8040 /* issue the write command */
8041 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8043 /* adjust timeout for emulation/FPGA */
8044 count = NVRAM_TIMEOUT_COUNT;
8045 if (CHIP_REV_IS_SLOW(bp))
8048 /* wait for completion */
8050 for (i = 0; i < count; i++) {
8052 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8053 if (val & MCPR_NVM_COMMAND_DONE) {
8062 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8064 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8072 if (offset + buf_size > bp->common.flash_size) {
8073 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8074 " buf_size (0x%x) > flash_size (0x%x)\n",
8075 offset, buf_size, bp->common.flash_size);
8079 /* request access to nvram interface */
8080 rc = bnx2x_acquire_nvram_lock(bp);
8084 /* enable access to nvram interface */
8085 bnx2x_enable_nvram_access(bp);
8087 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8088 align_offset = (offset & ~0x03);
8089 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8092 val &= ~(0xff << BYTE_OFFSET(offset));
8093 val |= (*data_buf << BYTE_OFFSET(offset));
8095 /* nvram data is returned as an array of bytes
8096 * convert it back to cpu order */
8097 val = be32_to_cpu(val);
8099 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8103 /* disable access to nvram interface */
8104 bnx2x_disable_nvram_access(bp);
8105 bnx2x_release_nvram_lock(bp);
8110 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8118 if (buf_size == 1) /* ethtool */
8119 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8121 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8123 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8128 if (offset + buf_size > bp->common.flash_size) {
8129 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8130 " buf_size (0x%x) > flash_size (0x%x)\n",
8131 offset, buf_size, bp->common.flash_size);
8135 /* request access to nvram interface */
8136 rc = bnx2x_acquire_nvram_lock(bp);
8140 /* enable access to nvram interface */
8141 bnx2x_enable_nvram_access(bp);
8144 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8145 while ((written_so_far < buf_size) && (rc == 0)) {
8146 if (written_so_far == (buf_size - sizeof(u32)))
8147 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8148 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8149 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8150 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8151 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8153 memcpy(&val, data_buf, 4);
8155 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8157 /* advance to the next dword */
8158 offset += sizeof(u32);
8159 data_buf += sizeof(u32);
8160 written_so_far += sizeof(u32);
8164 /* disable access to nvram interface */
8165 bnx2x_disable_nvram_access(bp);
8166 bnx2x_release_nvram_lock(bp);
8171 static int bnx2x_set_eeprom(struct net_device *dev,
8172 struct ethtool_eeprom *eeprom, u8 *eebuf)
8174 struct bnx2x *bp = netdev_priv(dev);
8177 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8178 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8179 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8180 eeprom->len, eeprom->len);
8182 /* parameters already validated in ethtool_set_eeprom */
8184 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8185 if (eeprom->magic == 0x00504859)
8188 bnx2x_acquire_phy_lock(bp);
8189 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8190 bp->link_params.ext_phy_config,
8191 (bp->state != BNX2X_STATE_CLOSED),
8192 eebuf, eeprom->len);
8193 if ((bp->state == BNX2X_STATE_OPEN) ||
8194 (bp->state == BNX2X_STATE_DISABLED)) {
8195 rc |= bnx2x_link_reset(&bp->link_params,
8197 rc |= bnx2x_phy_init(&bp->link_params,
8200 bnx2x_release_phy_lock(bp);
8202 } else /* Only the PMF can access the PHY */
8205 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8210 static int bnx2x_get_coalesce(struct net_device *dev,
8211 struct ethtool_coalesce *coal)
8213 struct bnx2x *bp = netdev_priv(dev);
8215 memset(coal, 0, sizeof(struct ethtool_coalesce));
8217 coal->rx_coalesce_usecs = bp->rx_ticks;
8218 coal->tx_coalesce_usecs = bp->tx_ticks;
8223 static int bnx2x_set_coalesce(struct net_device *dev,
8224 struct ethtool_coalesce *coal)
8226 struct bnx2x *bp = netdev_priv(dev);
8228 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8229 if (bp->rx_ticks > 3000)
8230 bp->rx_ticks = 3000;
8232 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8233 if (bp->tx_ticks > 0x3000)
8234 bp->tx_ticks = 0x3000;
8236 if (netif_running(dev))
8237 bnx2x_update_coalesce(bp);
8242 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8244 struct bnx2x *bp = netdev_priv(dev);
8248 if (data & ETH_FLAG_LRO) {
8249 if (!(dev->features & NETIF_F_LRO)) {
8250 dev->features |= NETIF_F_LRO;
8251 bp->flags |= TPA_ENABLE_FLAG;
8255 } else if (dev->features & NETIF_F_LRO) {
8256 dev->features &= ~NETIF_F_LRO;
8257 bp->flags &= ~TPA_ENABLE_FLAG;
8261 if (changed && netif_running(dev)) {
8262 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8263 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8269 static void bnx2x_get_ringparam(struct net_device *dev,
8270 struct ethtool_ringparam *ering)
8272 struct bnx2x *bp = netdev_priv(dev);
8274 ering->rx_max_pending = MAX_RX_AVAIL;
8275 ering->rx_mini_max_pending = 0;
8276 ering->rx_jumbo_max_pending = 0;
8278 ering->rx_pending = bp->rx_ring_size;
8279 ering->rx_mini_pending = 0;
8280 ering->rx_jumbo_pending = 0;
8282 ering->tx_max_pending = MAX_TX_AVAIL;
8283 ering->tx_pending = bp->tx_ring_size;
8286 static int bnx2x_set_ringparam(struct net_device *dev,
8287 struct ethtool_ringparam *ering)
8289 struct bnx2x *bp = netdev_priv(dev);
8292 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8293 (ering->tx_pending > MAX_TX_AVAIL) ||
8294 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8297 bp->rx_ring_size = ering->rx_pending;
8298 bp->tx_ring_size = ering->tx_pending;
8300 if (netif_running(dev)) {
8301 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8302 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8308 static void bnx2x_get_pauseparam(struct net_device *dev,
8309 struct ethtool_pauseparam *epause)
8311 struct bnx2x *bp = netdev_priv(dev);
8313 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8314 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8316 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8318 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8321 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8322 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8323 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8326 static int bnx2x_set_pauseparam(struct net_device *dev,
8327 struct ethtool_pauseparam *epause)
8329 struct bnx2x *bp = netdev_priv(dev);
8334 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8335 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8336 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8338 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8340 if (epause->rx_pause)
8341 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8343 if (epause->tx_pause)
8344 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8346 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8347 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8349 if (epause->autoneg) {
8350 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8351 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8355 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8356 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8360 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8362 if (netif_running(dev)) {
8363 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8370 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8372 struct bnx2x *bp = netdev_priv(dev);
8377 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8379 struct bnx2x *bp = netdev_priv(dev);
8385 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8388 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8389 dev->features |= NETIF_F_TSO6;
8391 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8392 dev->features &= ~NETIF_F_TSO6;
8398 static const struct {
8399 char string[ETH_GSTRING_LEN];
8400 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8401 { "register_test (offline)" },
8402 { "memory_test (offline)" },
8403 { "loopback_test (offline)" },
8404 { "nvram_test (online)" },
8405 { "interrupt_test (online)" },
8406 { "link_test (online)" },
8407 { "idle check (online)" },
8408 { "MC errors (online)" }
8411 static int bnx2x_self_test_count(struct net_device *dev)
8413 return BNX2X_NUM_TESTS;
8416 static int bnx2x_test_registers(struct bnx2x *bp)
8418 int idx, i, rc = -ENODEV;
8420 static const struct {
8425 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8426 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8427 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8428 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8429 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8430 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8431 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8432 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8433 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8434 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8435 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8436 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8437 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8438 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8439 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8440 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8441 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8442 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8443 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8444 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8445 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8446 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8447 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8448 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8449 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8450 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8451 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8452 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8453 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8454 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8455 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8456 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8457 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8458 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8459 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8460 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8461 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8462 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8464 { 0xffffffff, 0, 0x00000000 }
8467 if (!netif_running(bp->dev))
8470 /* Repeat the test twice:
8471 First by writing 0x00000000, second by writing 0xffffffff */
8472 for (idx = 0; idx < 2; idx++) {
8479 wr_val = 0xffffffff;
8483 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8484 u32 offset, mask, save_val, val;
8485 int port = BP_PORT(bp);
8487 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8488 mask = reg_tbl[i].mask;
8490 save_val = REG_RD(bp, offset);
8492 REG_WR(bp, offset, wr_val);
8493 val = REG_RD(bp, offset);
8495 /* Restore the original register's value */
8496 REG_WR(bp, offset, save_val);
8498 /* verify that value is as expected value */
8499 if ((val & mask) != (wr_val & mask))
8510 static int bnx2x_test_memory(struct bnx2x *bp)
8512 int i, j, rc = -ENODEV;
8514 static const struct {
8518 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8519 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8520 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8521 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8522 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8523 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8524 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8528 static const struct {
8533 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8534 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8535 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8536 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8537 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8538 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8540 { NULL, 0xffffffff, 0 }
8543 if (!netif_running(bp->dev))
8546 /* Go through all the memories */
8547 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8548 for (j = 0; j < mem_tbl[i].size; j++)
8549 REG_RD(bp, mem_tbl[i].offset + j*4);
8551 /* Check the parity status */
8552 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8553 val = REG_RD(bp, prty_tbl[i].offset);
8554 if (val & ~(prty_tbl[i].mask)) {
8556 "%s is 0x%x\n", prty_tbl[i].name, val);
8567 static void bnx2x_netif_start(struct bnx2x *bp)
8571 if (atomic_dec_and_test(&bp->intr_sem)) {
8572 if (netif_running(bp->dev)) {
8573 bnx2x_int_enable(bp);
8574 for_each_queue(bp, i)
8575 napi_enable(&bnx2x_fp(bp, i, napi));
8576 if (bp->state == BNX2X_STATE_OPEN)
8577 netif_wake_queue(bp->dev);
8582 static void bnx2x_netif_stop(struct bnx2x *bp)
8586 if (netif_running(bp->dev)) {
8587 netif_tx_disable(bp->dev);
8588 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8589 for_each_queue(bp, i)
8590 napi_disable(&bnx2x_fp(bp, i, napi));
8592 bnx2x_int_disable_sync(bp);
8595 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8600 while (bnx2x_link_test(bp) && cnt--)
8604 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8606 unsigned int pkt_size, num_pkts, i;
8607 struct sk_buff *skb;
8608 unsigned char *packet;
8609 struct bnx2x_fastpath *fp = &bp->fp[0];
8610 u16 tx_start_idx, tx_idx;
8611 u16 rx_start_idx, rx_idx;
8613 struct sw_tx_bd *tx_buf;
8614 struct eth_tx_bd *tx_bd;
8616 union eth_rx_cqe *cqe;
8618 struct sw_rx_bd *rx_buf;
8622 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8623 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8624 bnx2x_acquire_phy_lock(bp);
8625 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8626 bnx2x_release_phy_lock(bp);
8628 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8629 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8630 bnx2x_acquire_phy_lock(bp);
8631 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8632 bnx2x_release_phy_lock(bp);
8633 /* wait until link state is restored */
8634 bnx2x_wait_for_link(bp, link_up);
8640 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8643 goto test_loopback_exit;
8645 packet = skb_put(skb, pkt_size);
8646 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8647 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8648 for (i = ETH_HLEN; i < pkt_size; i++)
8649 packet[i] = (unsigned char) (i & 0xff);
8652 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8653 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8655 pkt_prod = fp->tx_pkt_prod++;
8656 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8657 tx_buf->first_bd = fp->tx_bd_prod;
8660 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8661 mapping = pci_map_single(bp->pdev, skb->data,
8662 skb_headlen(skb), PCI_DMA_TODEVICE);
8663 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8664 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8665 tx_bd->nbd = cpu_to_le16(1);
8666 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8667 tx_bd->vlan = cpu_to_le16(pkt_prod);
8668 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8669 ETH_TX_BD_FLAGS_END_BD);
8670 tx_bd->general_data = ((UNICAST_ADDRESS <<
8671 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8673 fp->hw_tx_prods->bds_prod =
8674 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8675 mb(); /* FW restriction: must not reorder writing nbd and packets */
8676 fp->hw_tx_prods->packets_prod =
8677 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8678 DOORBELL(bp, FP_IDX(fp), 0);
8684 bp->dev->trans_start = jiffies;
8688 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8689 if (tx_idx != tx_start_idx + num_pkts)
8690 goto test_loopback_exit;
8692 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8693 if (rx_idx != rx_start_idx + num_pkts)
8694 goto test_loopback_exit;
8696 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8697 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8698 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8699 goto test_loopback_rx_exit;
8701 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8702 if (len != pkt_size)
8703 goto test_loopback_rx_exit;
8705 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8707 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8708 for (i = ETH_HLEN; i < pkt_size; i++)
8709 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8710 goto test_loopback_rx_exit;
8714 test_loopback_rx_exit:
8715 bp->dev->last_rx = jiffies;
8717 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8718 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8719 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8720 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8722 /* Update producers */
8723 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8725 mmiowb(); /* keep prod updates ordered */
8728 bp->link_params.loopback_mode = LOOPBACK_NONE;
8733 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8737 if (!netif_running(bp->dev))
8738 return BNX2X_LOOPBACK_FAILED;
8740 bnx2x_netif_stop(bp);
8742 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8743 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8744 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8747 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8748 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8749 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8752 bnx2x_netif_start(bp);
8757 #define CRC32_RESIDUAL 0xdebb20e3
8759 static int bnx2x_test_nvram(struct bnx2x *bp)
8761 static const struct {
8765 { 0, 0x14 }, /* bootstrap */
8766 { 0x14, 0xec }, /* dir */
8767 { 0x100, 0x350 }, /* manuf_info */
8768 { 0x450, 0xf0 }, /* feature_info */
8769 { 0x640, 0x64 }, /* upgrade_key_info */
8771 { 0x708, 0x70 }, /* manuf_key_info */
8776 u8 *data = (u8 *)buf;
8780 rc = bnx2x_nvram_read(bp, 0, data, 4);
8782 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8783 goto test_nvram_exit;
8786 magic = be32_to_cpu(buf[0]);
8787 if (magic != 0x669955aa) {
8788 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8790 goto test_nvram_exit;
8793 for (i = 0; nvram_tbl[i].size; i++) {
8795 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8799 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8800 goto test_nvram_exit;
8803 csum = ether_crc_le(nvram_tbl[i].size, data);
8804 if (csum != CRC32_RESIDUAL) {
8806 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8808 goto test_nvram_exit;
8816 static int bnx2x_test_intr(struct bnx2x *bp)
8818 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8821 if (!netif_running(bp->dev))
8824 config->hdr.length_6b = 0;
8825 config->hdr.offset = 0;
8826 config->hdr.client_id = BP_CL_ID(bp);
8827 config->hdr.reserved1 = 0;
8829 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8830 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8831 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8833 bp->set_mac_pending++;
8834 for (i = 0; i < 10; i++) {
8835 if (!bp->set_mac_pending)
8837 msleep_interruptible(10);
8846 static void bnx2x_self_test(struct net_device *dev,
8847 struct ethtool_test *etest, u64 *buf)
8849 struct bnx2x *bp = netdev_priv(dev);
8851 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8853 if (!netif_running(dev))
8856 /* offline tests are not suppoerted in MF mode */
8858 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8860 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8863 link_up = bp->link_vars.link_up;
8864 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8865 bnx2x_nic_load(bp, LOAD_DIAG);
8866 /* wait until link state is restored */
8867 bnx2x_wait_for_link(bp, link_up);
8869 if (bnx2x_test_registers(bp) != 0) {
8871 etest->flags |= ETH_TEST_FL_FAILED;
8873 if (bnx2x_test_memory(bp) != 0) {
8875 etest->flags |= ETH_TEST_FL_FAILED;
8877 buf[2] = bnx2x_test_loopback(bp, link_up);
8879 etest->flags |= ETH_TEST_FL_FAILED;
8881 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8882 bnx2x_nic_load(bp, LOAD_NORMAL);
8883 /* wait until link state is restored */
8884 bnx2x_wait_for_link(bp, link_up);
8886 if (bnx2x_test_nvram(bp) != 0) {
8888 etest->flags |= ETH_TEST_FL_FAILED;
8890 if (bnx2x_test_intr(bp) != 0) {
8892 etest->flags |= ETH_TEST_FL_FAILED;
8895 if (bnx2x_link_test(bp) != 0) {
8897 etest->flags |= ETH_TEST_FL_FAILED;
8899 buf[7] = bnx2x_mc_assert(bp);
8901 etest->flags |= ETH_TEST_FL_FAILED;
8903 #ifdef BNX2X_EXTRA_DEBUG
8904 bnx2x_panic_dump(bp);
8908 static const struct {
8912 #define STATS_FLAGS_PORT 1
8913 #define STATS_FLAGS_FUNC 2
8914 u8 string[ETH_GSTRING_LEN];
8915 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8916 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8917 8, STATS_FLAGS_FUNC, "rx_bytes" },
8918 { STATS_OFFSET32(error_bytes_received_hi),
8919 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8920 { STATS_OFFSET32(total_bytes_transmitted_hi),
8921 8, STATS_FLAGS_FUNC, "tx_bytes" },
8922 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8923 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8924 { STATS_OFFSET32(total_unicast_packets_received_hi),
8925 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8926 { STATS_OFFSET32(total_multicast_packets_received_hi),
8927 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8928 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8929 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8930 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8931 8, STATS_FLAGS_FUNC, "tx_packets" },
8932 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8933 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8934 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8935 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8936 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8937 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8938 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8939 8, STATS_FLAGS_PORT, "rx_align_errors" },
8940 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8941 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8942 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8943 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8944 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8945 8, STATS_FLAGS_PORT, "tx_deferred" },
8946 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8947 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8948 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8949 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8950 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8951 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8952 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8953 8, STATS_FLAGS_PORT, "rx_fragments" },
8954 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8955 8, STATS_FLAGS_PORT, "rx_jabbers" },
8956 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8957 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8958 { STATS_OFFSET32(jabber_packets_received),
8959 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8960 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8961 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8962 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8963 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8964 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8965 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8966 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8967 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8968 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8969 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8970 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8971 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8972 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8973 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8974 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8975 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8976 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8977 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8978 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8979 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8980 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8981 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8982 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8983 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8984 { STATS_OFFSET32(mac_filter_discard),
8985 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8986 { STATS_OFFSET32(no_buff_discard),
8987 4, STATS_FLAGS_FUNC, "rx_discards" },
8988 { STATS_OFFSET32(xxoverflow_discard),
8989 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8990 { STATS_OFFSET32(brb_drop_hi),
8991 8, STATS_FLAGS_PORT, "brb_discard" },
8992 { STATS_OFFSET32(brb_truncate_hi),
8993 8, STATS_FLAGS_PORT, "brb_truncate" },
8994 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8995 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8996 { STATS_OFFSET32(rx_skb_alloc_failed),
8997 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8998 /* 42 */{ STATS_OFFSET32(hw_csum_err),
8999 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9002 #define IS_NOT_E1HMF_STAT(bp, i) \
9003 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9005 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9007 struct bnx2x *bp = netdev_priv(dev);
9010 switch (stringset) {
9012 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9013 if (IS_NOT_E1HMF_STAT(bp, i))
9015 strcpy(buf + j*ETH_GSTRING_LEN,
9016 bnx2x_stats_arr[i].string);
9022 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9027 static int bnx2x_get_stats_count(struct net_device *dev)
9029 struct bnx2x *bp = netdev_priv(dev);
9030 int i, num_stats = 0;
9032 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9033 if (IS_NOT_E1HMF_STAT(bp, i))
9040 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9041 struct ethtool_stats *stats, u64 *buf)
9043 struct bnx2x *bp = netdev_priv(dev);
9044 u32 *hw_stats = (u32 *)&bp->eth_stats;
9047 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9048 if (IS_NOT_E1HMF_STAT(bp, i))
9051 if (bnx2x_stats_arr[i].size == 0) {
9052 /* skip this counter */
9057 if (bnx2x_stats_arr[i].size == 4) {
9058 /* 4-byte counter */
9059 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9063 /* 8-byte counter */
9064 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9065 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9070 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9072 struct bnx2x *bp = netdev_priv(dev);
9073 int port = BP_PORT(bp);
9076 if (!netif_running(dev))
9085 for (i = 0; i < (data * 2); i++) {
9087 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9088 bp->link_params.hw_led_mode,
9089 bp->link_params.chip_id);
9091 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9092 bp->link_params.hw_led_mode,
9093 bp->link_params.chip_id);
9095 msleep_interruptible(500);
9096 if (signal_pending(current))
9100 if (bp->link_vars.link_up)
9101 bnx2x_set_led(bp, port, LED_MODE_OPER,
9102 bp->link_vars.line_speed,
9103 bp->link_params.hw_led_mode,
9104 bp->link_params.chip_id);
9109 static struct ethtool_ops bnx2x_ethtool_ops = {
9110 .get_settings = bnx2x_get_settings,
9111 .set_settings = bnx2x_set_settings,
9112 .get_drvinfo = bnx2x_get_drvinfo,
9113 .get_wol = bnx2x_get_wol,
9114 .set_wol = bnx2x_set_wol,
9115 .get_msglevel = bnx2x_get_msglevel,
9116 .set_msglevel = bnx2x_set_msglevel,
9117 .nway_reset = bnx2x_nway_reset,
9118 .get_link = ethtool_op_get_link,
9119 .get_eeprom_len = bnx2x_get_eeprom_len,
9120 .get_eeprom = bnx2x_get_eeprom,
9121 .set_eeprom = bnx2x_set_eeprom,
9122 .get_coalesce = bnx2x_get_coalesce,
9123 .set_coalesce = bnx2x_set_coalesce,
9124 .get_ringparam = bnx2x_get_ringparam,
9125 .set_ringparam = bnx2x_set_ringparam,
9126 .get_pauseparam = bnx2x_get_pauseparam,
9127 .set_pauseparam = bnx2x_set_pauseparam,
9128 .get_rx_csum = bnx2x_get_rx_csum,
9129 .set_rx_csum = bnx2x_set_rx_csum,
9130 .get_tx_csum = ethtool_op_get_tx_csum,
9131 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9132 .set_flags = bnx2x_set_flags,
9133 .get_flags = ethtool_op_get_flags,
9134 .get_sg = ethtool_op_get_sg,
9135 .set_sg = ethtool_op_set_sg,
9136 .get_tso = ethtool_op_get_tso,
9137 .set_tso = bnx2x_set_tso,
9138 .self_test_count = bnx2x_self_test_count,
9139 .self_test = bnx2x_self_test,
9140 .get_strings = bnx2x_get_strings,
9141 .phys_id = bnx2x_phys_id,
9142 .get_stats_count = bnx2x_get_stats_count,
9143 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9146 /* end of ethtool_ops */
9148 /****************************************************************************
9149 * General service functions
9150 ****************************************************************************/
9152 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9156 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9160 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9161 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9162 PCI_PM_CTRL_PME_STATUS));
9164 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9165 /* delay required during transition out of D3hot */
9170 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9174 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9176 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9179 /* No more memory access after this point until
9180 * device is brought back to D0.
9191 * net_device service functions
9194 static int bnx2x_poll(struct napi_struct *napi, int budget)
9196 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9198 struct bnx2x *bp = fp->bp;
9201 #ifdef BNX2X_STOP_ON_ERROR
9202 if (unlikely(bp->panic))
9206 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9207 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9208 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9210 bnx2x_update_fpsb_idx(fp);
9212 if (BNX2X_HAS_TX_WORK(fp))
9213 bnx2x_tx_int(fp, budget);
9215 if (BNX2X_HAS_RX_WORK(fp))
9216 work_done = bnx2x_rx_int(fp, budget);
9218 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9220 /* must not complete if we consumed full budget */
9221 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9223 #ifdef BNX2X_STOP_ON_ERROR
9226 netif_rx_complete(bp->dev, napi);
9228 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9229 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9230 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9231 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9237 /* we split the first BD into headers and data BDs
9238 * to ease the pain of our fellow micocode engineers
9239 * we use one mapping for both BDs
9240 * So far this has only been observed to happen
9241 * in Other Operating Systems(TM)
9243 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9244 struct bnx2x_fastpath *fp,
9245 struct eth_tx_bd **tx_bd, u16 hlen,
9246 u16 bd_prod, int nbd)
9248 struct eth_tx_bd *h_tx_bd = *tx_bd;
9249 struct eth_tx_bd *d_tx_bd;
9251 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9253 /* first fix first BD */
9254 h_tx_bd->nbd = cpu_to_le16(nbd);
9255 h_tx_bd->nbytes = cpu_to_le16(hlen);
9257 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9258 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9259 h_tx_bd->addr_lo, h_tx_bd->nbd);
9261 /* now get a new data BD
9262 * (after the pbd) and fill it */
9263 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9264 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9266 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9267 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9269 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9270 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9271 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9273 /* this marks the BD as one that has no individual mapping
9274 * the FW ignores this flag in a BD not marked start
9276 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9277 DP(NETIF_MSG_TX_QUEUED,
9278 "TSO split data size is %d (%x:%x)\n",
9279 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9281 /* update tx_bd for marking the last BD flag */
9287 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9290 csum = (u16) ~csum_fold(csum_sub(csum,
9291 csum_partial(t_header - fix, fix, 0)));
9294 csum = (u16) ~csum_fold(csum_add(csum,
9295 csum_partial(t_header, -fix, 0)));
9297 return swab16(csum);
9300 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9304 if (skb->ip_summed != CHECKSUM_PARTIAL)
9308 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9310 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9311 rc |= XMIT_CSUM_TCP;
9315 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9316 rc |= XMIT_CSUM_TCP;
9320 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9323 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9329 /* check if packet requires linearization (packet is too fragmented) */
9330 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9335 int first_bd_sz = 0;
9337 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9338 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9340 if (xmit_type & XMIT_GSO) {
9341 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9342 /* Check if LSO packet needs to be copied:
9343 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9344 int wnd_size = MAX_FETCH_BD - 3;
9345 /* Number of widnows to check */
9346 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9351 /* Headers length */
9352 hlen = (int)(skb_transport_header(skb) - skb->data) +
9355 /* Amount of data (w/o headers) on linear part of SKB*/
9356 first_bd_sz = skb_headlen(skb) - hlen;
9358 wnd_sum = first_bd_sz;
9360 /* Calculate the first sum - it's special */
9361 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9363 skb_shinfo(skb)->frags[frag_idx].size;
9365 /* If there was data on linear skb data - check it */
9366 if (first_bd_sz > 0) {
9367 if (unlikely(wnd_sum < lso_mss)) {
9372 wnd_sum -= first_bd_sz;
9375 /* Others are easier: run through the frag list and
9376 check all windows */
9377 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9379 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9381 if (unlikely(wnd_sum < lso_mss)) {
9386 skb_shinfo(skb)->frags[wnd_idx].size;
9390 /* in non-LSO too fragmented packet should always
9397 if (unlikely(to_copy))
9398 DP(NETIF_MSG_TX_QUEUED,
9399 "Linearization IS REQUIRED for %s packet. "
9400 "num_frags %d hlen %d first_bd_sz %d\n",
9401 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9402 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9407 /* called with netif_tx_lock
9408 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9409 * netif_wake_queue()
9411 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9413 struct bnx2x *bp = netdev_priv(dev);
9414 struct bnx2x_fastpath *fp;
9415 struct sw_tx_bd *tx_buf;
9416 struct eth_tx_bd *tx_bd;
9417 struct eth_tx_parse_bd *pbd = NULL;
9418 u16 pkt_prod, bd_prod;
9421 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9422 int vlan_off = (bp->e1hov ? 4 : 0);
9426 #ifdef BNX2X_STOP_ON_ERROR
9427 if (unlikely(bp->panic))
9428 return NETDEV_TX_BUSY;
9431 fp_index = (smp_processor_id() % bp->num_queues);
9432 fp = &bp->fp[fp_index];
9434 if (unlikely(bnx2x_tx_avail(bp->fp) <
9435 (skb_shinfo(skb)->nr_frags + 3))) {
9436 bp->eth_stats.driver_xoff++,
9437 netif_stop_queue(dev);
9438 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9439 return NETDEV_TX_BUSY;
9442 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9443 " gso type %x xmit_type %x\n",
9444 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9445 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9447 /* First, check if we need to linearaize the skb
9448 (due to FW restrictions) */
9449 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9450 /* Statistics of linearization */
9452 if (skb_linearize(skb) != 0) {
9453 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9454 "silently dropping this SKB\n");
9455 dev_kfree_skb_any(skb);
9456 return NETDEV_TX_OK;
9461 Please read carefully. First we use one BD which we mark as start,
9462 then for TSO or xsum we have a parsing info BD,
9463 and only then we have the rest of the TSO BDs.
9464 (don't forget to mark the last one as last,
9465 and to unmap only AFTER you write to the BD ...)
9466 And above all, all pdb sizes are in words - NOT DWORDS!
9469 pkt_prod = fp->tx_pkt_prod++;
9470 bd_prod = TX_BD(fp->tx_bd_prod);
9472 /* get a tx_buf and first BD */
9473 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9474 tx_bd = &fp->tx_desc_ring[bd_prod];
9476 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9477 tx_bd->general_data = (UNICAST_ADDRESS <<
9478 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9479 tx_bd->general_data |= 1; /* header nbd */
9481 /* remember the first BD of the packet */
9482 tx_buf->first_bd = fp->tx_bd_prod;
9485 DP(NETIF_MSG_TX_QUEUED,
9486 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9487 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9489 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9490 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9491 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9494 tx_bd->vlan = cpu_to_le16(pkt_prod);
9498 /* turn on parsing and get a BD */
9499 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9500 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9502 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9505 if (xmit_type & XMIT_CSUM) {
9506 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9508 /* for now NS flag is not used in Linux */
9509 pbd->global_data = (hlen |
9510 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9511 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9513 pbd->ip_hlen = (skb_transport_header(skb) -
9514 skb_network_header(skb)) / 2;
9516 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9518 pbd->total_hlen = cpu_to_le16(hlen);
9519 hlen = hlen*2 - vlan_off;
9521 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9523 if (xmit_type & XMIT_CSUM_V4)
9524 tx_bd->bd_flags.as_bitfield |=
9525 ETH_TX_BD_FLAGS_IP_CSUM;
9527 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9529 if (xmit_type & XMIT_CSUM_TCP) {
9530 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9533 s8 fix = SKB_CS_OFF(skb); /* signed! */
9535 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9536 pbd->cs_offset = fix / 2;
9538 DP(NETIF_MSG_TX_QUEUED,
9539 "hlen %d offset %d fix %d csum before fix %x\n",
9540 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9543 /* HW bug: fixup the CSUM */
9544 pbd->tcp_pseudo_csum =
9545 bnx2x_csum_fix(skb_transport_header(skb),
9548 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9549 pbd->tcp_pseudo_csum);
9553 mapping = pci_map_single(bp->pdev, skb->data,
9554 skb_headlen(skb), PCI_DMA_TODEVICE);
9556 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9557 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9558 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9559 tx_bd->nbd = cpu_to_le16(nbd);
9560 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9562 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9563 " nbytes %d flags %x vlan %x\n",
9564 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9565 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9566 le16_to_cpu(tx_bd->vlan));
9568 if (xmit_type & XMIT_GSO) {
9570 DP(NETIF_MSG_TX_QUEUED,
9571 "TSO packet len %d hlen %d total len %d tso size %d\n",
9572 skb->len, hlen, skb_headlen(skb),
9573 skb_shinfo(skb)->gso_size);
9575 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9577 if (unlikely(skb_headlen(skb) > hlen))
9578 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9581 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9582 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9583 pbd->tcp_flags = pbd_tcp_flags(skb);
9585 if (xmit_type & XMIT_GSO_V4) {
9586 pbd->ip_id = swab16(ip_hdr(skb)->id);
9587 pbd->tcp_pseudo_csum =
9588 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9590 0, IPPROTO_TCP, 0));
9593 pbd->tcp_pseudo_csum =
9594 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9595 &ipv6_hdr(skb)->daddr,
9596 0, IPPROTO_TCP, 0));
9598 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9601 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9604 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9605 tx_bd = &fp->tx_desc_ring[bd_prod];
9607 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9608 frag->size, PCI_DMA_TODEVICE);
9610 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9611 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9612 tx_bd->nbytes = cpu_to_le16(frag->size);
9613 tx_bd->vlan = cpu_to_le16(pkt_prod);
9614 tx_bd->bd_flags.as_bitfield = 0;
9616 DP(NETIF_MSG_TX_QUEUED,
9617 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9618 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9619 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9622 /* now at last mark the BD as the last BD */
9623 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9625 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9626 tx_bd, tx_bd->bd_flags.as_bitfield);
9628 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9630 /* now send a tx doorbell, counting the next BD
9631 * if the packet contains or ends with it
9633 if (TX_BD_POFF(bd_prod) < nbd)
9637 DP(NETIF_MSG_TX_QUEUED,
9638 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9639 " tcp_flags %x xsum %x seq %u hlen %u\n",
9640 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9641 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9642 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9644 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9646 fp->hw_tx_prods->bds_prod =
9647 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9648 mb(); /* FW restriction: must not reorder writing nbd and packets */
9649 fp->hw_tx_prods->packets_prod =
9650 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9651 DOORBELL(bp, FP_IDX(fp), 0);
9655 fp->tx_bd_prod += nbd;
9656 dev->trans_start = jiffies;
9658 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9659 netif_stop_queue(dev);
9660 bp->eth_stats.driver_xoff++;
9661 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9662 netif_wake_queue(dev);
9666 return NETDEV_TX_OK;
9669 /* called with rtnl_lock */
9670 static int bnx2x_open(struct net_device *dev)
9672 struct bnx2x *bp = netdev_priv(dev);
9674 bnx2x_set_power_state(bp, PCI_D0);
9676 return bnx2x_nic_load(bp, LOAD_OPEN);
9679 /* called with rtnl_lock */
9680 static int bnx2x_close(struct net_device *dev)
9682 struct bnx2x *bp = netdev_priv(dev);
9684 /* Unload the driver, release IRQs */
9685 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9686 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9687 if (!CHIP_REV_IS_SLOW(bp))
9688 bnx2x_set_power_state(bp, PCI_D3hot);
9693 /* called with netif_tx_lock from set_multicast */
9694 static void bnx2x_set_rx_mode(struct net_device *dev)
9696 struct bnx2x *bp = netdev_priv(dev);
9697 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9698 int port = BP_PORT(bp);
9700 if (bp->state != BNX2X_STATE_OPEN) {
9701 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9705 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9707 if (dev->flags & IFF_PROMISC)
9708 rx_mode = BNX2X_RX_MODE_PROMISC;
9710 else if ((dev->flags & IFF_ALLMULTI) ||
9711 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9712 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9714 else { /* some multicasts */
9715 if (CHIP_IS_E1(bp)) {
9717 struct dev_mc_list *mclist;
9718 struct mac_configuration_cmd *config =
9719 bnx2x_sp(bp, mcast_config);
9721 for (i = 0, mclist = dev->mc_list;
9722 mclist && (i < dev->mc_count);
9723 i++, mclist = mclist->next) {
9725 config->config_table[i].
9726 cam_entry.msb_mac_addr =
9727 swab16(*(u16 *)&mclist->dmi_addr[0]);
9728 config->config_table[i].
9729 cam_entry.middle_mac_addr =
9730 swab16(*(u16 *)&mclist->dmi_addr[2]);
9731 config->config_table[i].
9732 cam_entry.lsb_mac_addr =
9733 swab16(*(u16 *)&mclist->dmi_addr[4]);
9734 config->config_table[i].cam_entry.flags =
9736 config->config_table[i].
9737 target_table_entry.flags = 0;
9738 config->config_table[i].
9739 target_table_entry.client_id = 0;
9740 config->config_table[i].
9741 target_table_entry.vlan_id = 0;
9744 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9745 config->config_table[i].
9746 cam_entry.msb_mac_addr,
9747 config->config_table[i].
9748 cam_entry.middle_mac_addr,
9749 config->config_table[i].
9750 cam_entry.lsb_mac_addr);
9752 old = config->hdr.length_6b;
9754 for (; i < old; i++) {
9755 if (CAM_IS_INVALID(config->
9757 i--; /* already invalidated */
9761 CAM_INVALIDATE(config->
9766 if (CHIP_REV_IS_SLOW(bp))
9767 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9769 offset = BNX2X_MAX_MULTICAST*(1 + port);
9771 config->hdr.length_6b = i;
9772 config->hdr.offset = offset;
9773 config->hdr.client_id = BP_CL_ID(bp);
9774 config->hdr.reserved1 = 0;
9776 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9777 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9778 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9781 /* Accept one or more multicasts */
9782 struct dev_mc_list *mclist;
9783 u32 mc_filter[MC_HASH_SIZE];
9784 u32 crc, bit, regidx;
9787 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9789 for (i = 0, mclist = dev->mc_list;
9790 mclist && (i < dev->mc_count);
9791 i++, mclist = mclist->next) {
9793 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9794 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9795 mclist->dmi_addr[0], mclist->dmi_addr[1],
9796 mclist->dmi_addr[2], mclist->dmi_addr[3],
9797 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9799 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9800 bit = (crc >> 24) & 0xff;
9803 mc_filter[regidx] |= (1 << bit);
9806 for (i = 0; i < MC_HASH_SIZE; i++)
9807 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9812 bp->rx_mode = rx_mode;
9813 bnx2x_set_storm_rx_mode(bp);
9816 /* called with rtnl_lock */
9817 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9819 struct sockaddr *addr = p;
9820 struct bnx2x *bp = netdev_priv(dev);
9822 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9825 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9826 if (netif_running(dev)) {
9828 bnx2x_set_mac_addr_e1(bp);
9830 bnx2x_set_mac_addr_e1h(bp);
9836 /* called with rtnl_lock */
9837 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9839 struct mii_ioctl_data *data = if_mii(ifr);
9840 struct bnx2x *bp = netdev_priv(dev);
9845 data->phy_id = bp->port.phy_addr;
9852 if (!netif_running(dev))
9855 mutex_lock(&bp->port.phy_mutex);
9856 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9857 DEFAULT_PHY_DEV_ADDR,
9858 (data->reg_num & 0x1f), &mii_regval);
9859 data->val_out = mii_regval;
9860 mutex_unlock(&bp->port.phy_mutex);
9865 if (!capable(CAP_NET_ADMIN))
9868 if (!netif_running(dev))
9871 mutex_lock(&bp->port.phy_mutex);
9872 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9873 DEFAULT_PHY_DEV_ADDR,
9874 (data->reg_num & 0x1f), data->val_in);
9875 mutex_unlock(&bp->port.phy_mutex);
9886 /* called with rtnl_lock */
9887 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9889 struct bnx2x *bp = netdev_priv(dev);
9892 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9893 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9896 /* This does not race with packet allocation
9897 * because the actual alloc size is
9898 * only updated as part of load
9902 if (netif_running(dev)) {
9903 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9904 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9910 static void bnx2x_tx_timeout(struct net_device *dev)
9912 struct bnx2x *bp = netdev_priv(dev);
9914 #ifdef BNX2X_STOP_ON_ERROR
9918 /* This allows the netif to be shutdown gracefully before resetting */
9919 schedule_work(&bp->reset_task);
9923 /* called with rtnl_lock */
9924 static void bnx2x_vlan_rx_register(struct net_device *dev,
9925 struct vlan_group *vlgrp)
9927 struct bnx2x *bp = netdev_priv(dev);
9930 if (netif_running(dev))
9931 bnx2x_set_client_config(bp);
9936 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9937 static void poll_bnx2x(struct net_device *dev)
9939 struct bnx2x *bp = netdev_priv(dev);
9941 disable_irq(bp->pdev->irq);
9942 bnx2x_interrupt(bp->pdev->irq, dev);
9943 enable_irq(bp->pdev->irq);
9947 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9948 struct net_device *dev)
9953 SET_NETDEV_DEV(dev, &pdev->dev);
9954 bp = netdev_priv(dev);
9959 bp->func = PCI_FUNC(pdev->devfn);
9961 rc = pci_enable_device(pdev);
9963 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9967 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9968 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9971 goto err_out_disable;
9974 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9975 printk(KERN_ERR PFX "Cannot find second PCI device"
9976 " base address, aborting\n");
9978 goto err_out_disable;
9981 if (atomic_read(&pdev->enable_cnt) == 1) {
9982 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9984 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9986 goto err_out_disable;
9989 pci_set_master(pdev);
9990 pci_save_state(pdev);
9993 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9994 if (bp->pm_cap == 0) {
9995 printk(KERN_ERR PFX "Cannot find power management"
9996 " capability, aborting\n");
9998 goto err_out_release;
10001 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10002 if (bp->pcie_cap == 0) {
10003 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10006 goto err_out_release;
10009 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10010 bp->flags |= USING_DAC_FLAG;
10011 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10012 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10013 " failed, aborting\n");
10015 goto err_out_release;
10018 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10019 printk(KERN_ERR PFX "System does not support DMA,"
10022 goto err_out_release;
10025 dev->mem_start = pci_resource_start(pdev, 0);
10026 dev->base_addr = dev->mem_start;
10027 dev->mem_end = pci_resource_end(pdev, 0);
10029 dev->irq = pdev->irq;
10031 bp->regview = ioremap_nocache(dev->base_addr,
10032 pci_resource_len(pdev, 0));
10033 if (!bp->regview) {
10034 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10036 goto err_out_release;
10039 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10040 min_t(u64, BNX2X_DB_SIZE,
10041 pci_resource_len(pdev, 2)));
10042 if (!bp->doorbells) {
10043 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10045 goto err_out_unmap;
10048 bnx2x_set_power_state(bp, PCI_D0);
10050 /* clean indirect addresses */
10051 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10052 PCICFG_VENDOR_ID_OFFSET);
10053 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10054 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10055 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10056 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10058 dev->hard_start_xmit = bnx2x_start_xmit;
10059 dev->watchdog_timeo = TX_TIMEOUT;
10061 dev->ethtool_ops = &bnx2x_ethtool_ops;
10062 dev->open = bnx2x_open;
10063 dev->stop = bnx2x_close;
10064 dev->set_multicast_list = bnx2x_set_rx_mode;
10065 dev->set_mac_address = bnx2x_change_mac_addr;
10066 dev->do_ioctl = bnx2x_ioctl;
10067 dev->change_mtu = bnx2x_change_mtu;
10068 dev->tx_timeout = bnx2x_tx_timeout;
10070 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10072 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10073 dev->poll_controller = poll_bnx2x;
10075 dev->features |= NETIF_F_SG;
10076 dev->features |= NETIF_F_HW_CSUM;
10077 if (bp->flags & USING_DAC_FLAG)
10078 dev->features |= NETIF_F_HIGHDMA;
10080 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10082 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10083 dev->features |= NETIF_F_TSO6;
10089 iounmap(bp->regview);
10090 bp->regview = NULL;
10092 if (bp->doorbells) {
10093 iounmap(bp->doorbells);
10094 bp->doorbells = NULL;
10098 if (atomic_read(&pdev->enable_cnt) == 1)
10099 pci_release_regions(pdev);
10102 pci_disable_device(pdev);
10103 pci_set_drvdata(pdev, NULL);
10109 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10111 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10113 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10117 /* return value of 1=2.5GHz 2=5GHz */
10118 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10120 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10122 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10126 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10127 const struct pci_device_id *ent)
10129 static int version_printed;
10130 struct net_device *dev = NULL;
10133 DECLARE_MAC_BUF(mac);
10135 if (version_printed++ == 0)
10136 printk(KERN_INFO "%s", version);
10138 /* dev zeroed in init_etherdev */
10139 dev = alloc_etherdev(sizeof(*bp));
10141 printk(KERN_ERR PFX "Cannot allocate net device\n");
10145 netif_carrier_off(dev);
10147 bp = netdev_priv(dev);
10148 bp->msglevel = debug;
10150 rc = bnx2x_init_dev(pdev, dev);
10156 rc = register_netdev(dev);
10158 dev_err(&pdev->dev, "Cannot register net device\n");
10159 goto init_one_exit;
10162 pci_set_drvdata(pdev, dev);
10164 rc = bnx2x_init_bp(bp);
10166 unregister_netdev(dev);
10167 goto init_one_exit;
10170 bp->common.name = board_info[ent->driver_data].name;
10171 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10172 " IRQ %d, ", dev->name, bp->common.name,
10173 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10174 bnx2x_get_pcie_width(bp),
10175 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10176 dev->base_addr, bp->pdev->irq);
10177 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10182 iounmap(bp->regview);
10185 iounmap(bp->doorbells);
10189 if (atomic_read(&pdev->enable_cnt) == 1)
10190 pci_release_regions(pdev);
10192 pci_disable_device(pdev);
10193 pci_set_drvdata(pdev, NULL);
10198 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10200 struct net_device *dev = pci_get_drvdata(pdev);
10204 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10207 bp = netdev_priv(dev);
10209 unregister_netdev(dev);
10212 iounmap(bp->regview);
10215 iounmap(bp->doorbells);
10219 if (atomic_read(&pdev->enable_cnt) == 1)
10220 pci_release_regions(pdev);
10222 pci_disable_device(pdev);
10223 pci_set_drvdata(pdev, NULL);
10226 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10228 struct net_device *dev = pci_get_drvdata(pdev);
10232 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10235 bp = netdev_priv(dev);
10239 pci_save_state(pdev);
10241 if (!netif_running(dev)) {
10246 netif_device_detach(dev);
10248 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10250 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10257 static int bnx2x_resume(struct pci_dev *pdev)
10259 struct net_device *dev = pci_get_drvdata(pdev);
10264 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10267 bp = netdev_priv(dev);
10271 pci_restore_state(pdev);
10273 if (!netif_running(dev)) {
10278 bnx2x_set_power_state(bp, PCI_D0);
10279 netif_device_attach(dev);
10281 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10289 * bnx2x_io_error_detected - called when PCI error is detected
10290 * @pdev: Pointer to PCI device
10291 * @state: The current pci connection state
10293 * This function is called after a PCI bus error affecting
10294 * this device has been detected.
10296 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10297 pci_channel_state_t state)
10299 struct net_device *dev = pci_get_drvdata(pdev);
10300 struct bnx2x *bp = netdev_priv(dev);
10304 netif_device_detach(dev);
10306 if (netif_running(dev))
10307 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10309 pci_disable_device(pdev);
10313 /* Request a slot reset */
10314 return PCI_ERS_RESULT_NEED_RESET;
10318 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10319 * @pdev: Pointer to PCI device
10321 * Restart the card from scratch, as if from a cold-boot.
10323 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10325 struct net_device *dev = pci_get_drvdata(pdev);
10326 struct bnx2x *bp = netdev_priv(dev);
10330 if (pci_enable_device(pdev)) {
10331 dev_err(&pdev->dev,
10332 "Cannot re-enable PCI device after reset\n");
10334 return PCI_ERS_RESULT_DISCONNECT;
10337 pci_set_master(pdev);
10338 pci_restore_state(pdev);
10340 if (netif_running(dev))
10341 bnx2x_set_power_state(bp, PCI_D0);
10345 return PCI_ERS_RESULT_RECOVERED;
10349 * bnx2x_io_resume - called when traffic can start flowing again
10350 * @pdev: Pointer to PCI device
10352 * This callback is called when the error recovery driver tells us that
10353 * its OK to resume normal operation.
10355 static void bnx2x_io_resume(struct pci_dev *pdev)
10357 struct net_device *dev = pci_get_drvdata(pdev);
10358 struct bnx2x *bp = netdev_priv(dev);
10362 if (netif_running(dev))
10363 bnx2x_nic_load(bp, LOAD_OPEN);
10365 netif_device_attach(dev);
10370 static struct pci_error_handlers bnx2x_err_handler = {
10371 .error_detected = bnx2x_io_error_detected,
10372 .slot_reset = bnx2x_io_slot_reset,
10373 .resume = bnx2x_io_resume,
10376 static struct pci_driver bnx2x_pci_driver = {
10377 .name = DRV_MODULE_NAME,
10378 .id_table = bnx2x_pci_tbl,
10379 .probe = bnx2x_init_one,
10380 .remove = __devexit_p(bnx2x_remove_one),
10381 .suspend = bnx2x_suspend,
10382 .resume = bnx2x_resume,
10383 .err_handler = &bnx2x_err_handler,
10386 static int __init bnx2x_init(void)
10388 return pci_register_driver(&bnx2x_pci_driver);
10391 static void __exit bnx2x_cleanup(void)
10393 pci_unregister_driver(&bnx2x_pci_driver);
10396 module_init(bnx2x_init);
10397 module_exit(bnx2x_cleanup);