1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
245 BNX2X_ERR("dmae timeout!\n");
251 mutex_unlock(&bp->dmae_mutex);
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 struct dmae_command *dmae = &bp->init_dmae;
257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 mutex_lock(&bp->dmae_mutex);
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 DMAE_CMD_ENDIANITY_DW_SWAP |
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
309 while (*wb_comp != DMAE_COMP_VAL) {
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
318 BNX2X_ERR("dmae timeout!\n");
323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327 mutex_unlock(&bp->dmae_mutex);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
345 REG_RD_DMAE(bp, reg, wb_data, 2);
347 return HILO_U64(wb_data[0], wb_data[1]);
351 static int bnx2x_mc_assert(struct bnx2x *bp)
355 u32 row0, row1, row2, row3;
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
472 static void bnx2x_fw_dump(struct bnx2x *bp)
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 printk(KERN_CONT "%s", (char *)data);
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x *bp)
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507 BNX2X_ERR("begin crash dump -----------------\n");
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
582 " spq_prod_idx(%u)\n",
583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
588 BNX2X_ERR("end crash dump -----------------\n");
591 static void bnx2x_int_enable(struct bnx2x *bp)
593 int port = BP_PORT(bp);
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
611 REG_WR(bp, addr, val);
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
617 val, port, addr, msix);
619 REG_WR(bp, addr, val);
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
626 /* enable nig attention */
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 static void bnx2x_int_disable(struct bnx2x *bp)
638 int port = BP_PORT(bp);
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
655 static void bnx2x_int_disable_sync(struct bnx2x *bp)
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
660 /* disable interrupt handling */
661 atomic_inc(&bp->intr_sem);
662 /* prevent the HW from sending interrupts */
663 bnx2x_int_disable(bp);
665 /* make sure all ISRs are done */
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
673 synchronize_irq(bp->pdev->irq);
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
682 * General service functions
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686 u8 storm, u16 index, u8 op, u8 update)
688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
689 struct igu_ack_register igu_ack;
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
703 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705 struct host_status_block *fpsb = fp->status_blk;
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
720 static u16 bnx2x_ack_int(struct bnx2x *bp)
722 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
723 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
726 result, BAR_IGU_INTMEM + igu_addr);
729 #warning IGU_DEBUG active
731 BNX2X_ERR("read %x from IGU\n", result);
732 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
740 * fast path service functions
743 /* free skb in the packet ring at pos idx
744 * return idx of last bd freed
746 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
749 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
750 struct eth_tx_bd *tx_bd;
751 struct sk_buff *skb = tx_buf->skb;
752 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
755 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
759 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
760 tx_bd = &fp->tx_desc_ring[bd_idx];
761 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
762 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
764 nbd = le16_to_cpu(tx_bd->nbd) - 1;
765 new_cons = nbd + tx_buf->first_bd;
766 #ifdef BNX2X_STOP_ON_ERROR
767 if (nbd > (MAX_SKB_FRAGS + 2)) {
768 BNX2X_ERR("BAD nbd!\n");
773 /* Skip a parse bd and the TSO split header bd
774 since they have no mapping */
776 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
779 ETH_TX_BD_FLAGS_TCP_CSUM |
780 ETH_TX_BD_FLAGS_SW_LSO)) {
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 tx_bd = &fp->tx_desc_ring[bd_idx];
784 /* is this a TSO split header bd? */
785 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
797 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
805 tx_buf->first_bd = 0;
811 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
817 barrier(); /* Tell compiler that prod and cons can change */
818 prod = fp->tx_bd_prod;
819 cons = fp->tx_bd_cons;
821 /* NUM_TX_RINGS = number of "next-page" entries
822 It will be used as a threshold */
823 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
825 #ifdef BNX2X_STOP_ON_ERROR
827 WARN_ON(used > fp->bp->tx_ring_size);
828 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
831 return (s16)(fp->bp->tx_ring_size) - used;
834 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
836 struct bnx2x *bp = fp->bp;
837 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
840 #ifdef BNX2X_STOP_ON_ERROR
841 if (unlikely(bp->panic))
845 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
846 sw_cons = fp->tx_pkt_cons;
848 while (sw_cons != hw_cons) {
851 pkt_cons = TX_BD(sw_cons);
853 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
855 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
856 hw_cons, sw_cons, pkt_cons);
858 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
860 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
863 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
871 fp->tx_pkt_cons = sw_cons;
872 fp->tx_bd_cons = bd_cons;
874 /* Need to make the tx_cons update visible to start_xmit()
875 * before checking for netif_queue_stopped(). Without the
876 * memory barrier, there is a small possibility that start_xmit()
877 * will miss it and cause the queue to be stopped forever.
881 /* TBD need a thresh? */
882 if (unlikely(netif_queue_stopped(bp->dev))) {
884 netif_tx_lock(bp->dev);
886 if (netif_queue_stopped(bp->dev) &&
887 (bp->state == BNX2X_STATE_OPEN) &&
888 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
889 netif_wake_queue(bp->dev);
891 netif_tx_unlock(bp->dev);
895 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
896 union eth_rx_cqe *rr_cqe)
898 struct bnx2x *bp = fp->bp;
899 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
900 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
903 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
904 FP_IDX(fp), cid, command, bp->state,
905 rr_cqe->ramrod_cqe.ramrod_type);
910 switch (command | fp->state) {
911 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
912 BNX2X_FP_STATE_OPENING):
913 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
915 fp->state = BNX2X_FP_STATE_OPEN;
918 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
919 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
921 fp->state = BNX2X_FP_STATE_HALTED;
925 BNX2X_ERR("unexpected MC reply (%d) "
926 "fp->state is %x\n", command, fp->state);
929 mb(); /* force bnx2x_wait_ramrod() to see the change */
933 switch (command | bp->state) {
934 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
935 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
936 bp->state = BNX2X_STATE_OPEN;
939 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
941 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
942 fp->state = BNX2X_FP_STATE_HALTED;
945 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
946 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
947 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
952 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
953 bp->set_mac_pending = 0;
956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
957 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
961 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
965 mb(); /* force bnx2x_wait_ramrod() to see the change */
968 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
969 struct bnx2x_fastpath *fp, u16 index)
971 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
972 struct page *page = sw_buf->page;
973 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
975 /* Skip "next page" elements */
979 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
980 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
981 __free_pages(page, PAGES_PER_SGE_SHIFT);
988 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
989 struct bnx2x_fastpath *fp, int last)
993 for (i = 0; i < last; i++)
994 bnx2x_free_rx_sge(bp, fp, i);
997 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
1000 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1001 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1005 if (unlikely(page == NULL))
1008 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1009 PCI_DMA_FROMDEVICE);
1010 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1011 __free_pages(page, PAGES_PER_SGE_SHIFT);
1015 sw_buf->page = page;
1016 pci_unmap_addr_set(sw_buf, mapping, mapping);
1018 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1019 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1024 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1025 struct bnx2x_fastpath *fp, u16 index)
1027 struct sk_buff *skb;
1028 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1029 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1032 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1033 if (unlikely(skb == NULL))
1036 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1037 PCI_DMA_FROMDEVICE);
1038 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1044 pci_unmap_addr_set(rx_buf, mapping, mapping);
1046 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1047 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1052 /* note that we are not allocating a new skb,
1053 * we are just moving one from cons to prod
1054 * we are not creating a new mapping,
1055 * so there is no need to check for dma_mapping_error().
1057 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1058 struct sk_buff *skb, u16 cons, u16 prod)
1060 struct bnx2x *bp = fp->bp;
1061 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1062 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1063 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1064 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1066 pci_dma_sync_single_for_device(bp->pdev,
1067 pci_unmap_addr(cons_rx_buf, mapping),
1068 bp->rx_offset + RX_COPY_THRESH,
1069 PCI_DMA_FROMDEVICE);
1071 prod_rx_buf->skb = cons_rx_buf->skb;
1072 pci_unmap_addr_set(prod_rx_buf, mapping,
1073 pci_unmap_addr(cons_rx_buf, mapping));
1074 *prod_bd = *cons_bd;
1077 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1080 u16 last_max = fp->last_max_sge;
1082 if (SUB_S16(idx, last_max) > 0)
1083 fp->last_max_sge = idx;
1086 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1090 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1091 int idx = RX_SGE_CNT * i - 1;
1093 for (j = 0; j < 2; j++) {
1094 SGE_MASK_CLEAR_BIT(fp, idx);
1100 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1101 struct eth_fast_path_rx_cqe *fp_cqe)
1103 struct bnx2x *bp = fp->bp;
1104 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1105 le16_to_cpu(fp_cqe->len_on_bd)) >>
1107 u16 last_max, last_elem, first_elem;
1114 /* First mark all used pages */
1115 for (i = 0; i < sge_len; i++)
1116 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1118 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1119 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121 /* Here we assume that the last SGE index is the biggest */
1122 prefetch((void *)(fp->sge_mask));
1123 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1125 last_max = RX_SGE(fp->last_max_sge);
1126 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1127 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1129 /* If ring is not full */
1130 if (last_elem + 1 != first_elem)
1133 /* Now update the prod */
1134 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1135 if (likely(fp->sge_mask[i]))
1138 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1139 delta += RX_SGE_MASK_ELEM_SZ;
1143 fp->rx_sge_prod += delta;
1144 /* clear page-end entries */
1145 bnx2x_clear_sge_mask_next_elems(fp);
1148 DP(NETIF_MSG_RX_STATUS,
1149 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1150 fp->last_max_sge, fp->rx_sge_prod);
1153 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1155 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1156 memset(fp->sge_mask, 0xff,
1157 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1159 /* Clear the two last indeces in the page to 1:
1160 these are the indeces that correspond to the "next" element,
1161 hence will never be indicated and should be removed from
1162 the calculations. */
1163 bnx2x_clear_sge_mask_next_elems(fp);
1166 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1167 struct sk_buff *skb, u16 cons, u16 prod)
1169 struct bnx2x *bp = fp->bp;
1170 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1171 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1172 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 /* move empty skb from pool to prod and map it */
1176 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1177 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1178 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1179 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1181 /* move partial skb from cons to pool (don't unmap yet) */
1182 fp->tpa_pool[queue] = *cons_rx_buf;
1184 /* mark bin state as start - print error if current state != stop */
1185 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1186 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1188 fp->tpa_state[queue] = BNX2X_TPA_START;
1190 /* point prod_bd to new skb */
1191 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1194 #ifdef BNX2X_STOP_ON_ERROR
1195 fp->tpa_queue_used |= (1 << queue);
1196 #ifdef __powerpc64__
1197 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1199 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1201 fp->tpa_queue_used);
1205 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1206 struct sk_buff *skb,
1207 struct eth_fast_path_rx_cqe *fp_cqe,
1210 struct sw_rx_page *rx_pg, old_rx_pg;
1212 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1213 u32 i, frag_len, frag_size, pages;
1217 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1218 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1220 /* This is needed in order to enable forwarding support */
1222 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1223 max(frag_size, (u32)len_on_bd));
1225 #ifdef BNX2X_STOP_ON_ERROR
1226 if (pages > 8*PAGES_PER_SGE) {
1227 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1229 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1230 fp_cqe->pkt_len, len_on_bd);
1236 /* Run through the SGL and compose the fragmented skb */
1237 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1238 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1240 /* FW gives the indices of the SGE as if the ring is an array
1241 (meaning that "next" element will consume 2 indices) */
1242 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1243 rx_pg = &fp->rx_page_ring[sge_idx];
1247 /* If we fail to allocate a substitute page, we simply stop
1248 where we are and drop the whole packet */
1249 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1250 if (unlikely(err)) {
1251 bp->eth_stats.rx_skb_alloc_failed++;
1255 /* Unmap the page as we r going to pass it to the stack */
1256 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1257 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1259 /* Add one frag and update the appropriate fields in the skb */
1260 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1262 skb->data_len += frag_len;
1263 skb->truesize += frag_len;
1264 skb->len += frag_len;
1266 frag_size -= frag_len;
1272 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1276 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1277 struct sk_buff *skb = rx_buf->skb;
1279 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1281 /* Unmap skb in the pool anyway, as we are going to change
1282 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1284 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1285 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1287 if (likely(new_skb)) {
1288 /* fix ip xsum and give it to the stack */
1289 /* (no need to map the new skb) */
1292 prefetch(((char *)(skb)) + 128);
1294 #ifdef BNX2X_STOP_ON_ERROR
1295 if (pad + len > bp->rx_buf_size) {
1296 BNX2X_ERR("skb_put is about to fail... "
1297 "pad %d len %d rx_buf_size %d\n",
1298 pad, len, bp->rx_buf_size);
1304 skb_reserve(skb, pad);
1307 skb->protocol = eth_type_trans(skb, bp->dev);
1308 skb->ip_summed = CHECKSUM_UNNECESSARY;
1313 iph = (struct iphdr *)skb->data;
1315 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1318 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1319 &cqe->fast_path_cqe, cqe_idx)) {
1321 if ((bp->vlgrp != NULL) &&
1322 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1323 PARSING_FLAGS_VLAN))
1324 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1325 le16_to_cpu(cqe->fast_path_cqe.
1329 netif_receive_skb(skb);
1331 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1332 " - dropping packet!\n");
1336 bp->dev->last_rx = jiffies;
1338 /* put new skb in bin */
1339 fp->tpa_pool[queue].skb = new_skb;
1342 /* else drop the packet and keep the buffer in the bin */
1343 DP(NETIF_MSG_RX_STATUS,
1344 "Failed to allocate new skb - dropping packet!\n");
1345 bp->eth_stats.rx_skb_alloc_failed++;
1348 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1351 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1352 struct bnx2x_fastpath *fp,
1353 u16 bd_prod, u16 rx_comp_prod,
1356 struct tstorm_eth_rx_producers rx_prods = {0};
1359 /* Update producers */
1360 rx_prods.bd_prod = bd_prod;
1361 rx_prods.cqe_prod = rx_comp_prod;
1362 rx_prods.sge_prod = rx_sge_prod;
1364 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1365 REG_WR(bp, BAR_TSTRORM_INTMEM +
1366 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1367 ((u32 *)&rx_prods)[i]);
1369 DP(NETIF_MSG_RX_STATUS,
1370 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1371 bd_prod, rx_comp_prod, rx_sge_prod);
1374 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1376 struct bnx2x *bp = fp->bp;
1377 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1378 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1382 #ifdef BNX2X_STOP_ON_ERROR
1383 if (unlikely(bp->panic))
1387 /* CQ "next element" is of the size of the regular element,
1388 that's why it's ok here */
1389 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1390 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1393 bd_cons = fp->rx_bd_cons;
1394 bd_prod = fp->rx_bd_prod;
1395 bd_prod_fw = bd_prod;
1396 sw_comp_cons = fp->rx_comp_cons;
1397 sw_comp_prod = fp->rx_comp_prod;
1399 /* Memory barrier necessary as speculative reads of the rx
1400 * buffer can be ahead of the index in the status block
1404 DP(NETIF_MSG_RX_STATUS,
1405 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1406 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1408 while (sw_comp_cons != hw_comp_cons) {
1409 struct sw_rx_bd *rx_buf = NULL;
1410 struct sk_buff *skb;
1411 union eth_rx_cqe *cqe;
1415 comp_ring_cons = RCQ_BD(sw_comp_cons);
1416 bd_prod = RX_BD(bd_prod);
1417 bd_cons = RX_BD(bd_cons);
1419 cqe = &fp->rx_comp_ring[comp_ring_cons];
1420 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1422 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1423 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1424 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1425 cqe->fast_path_cqe.rss_hash_result,
1426 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1427 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1429 /* is this a slowpath msg? */
1430 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1431 bnx2x_sp_event(fp, cqe);
1434 /* this is an rx packet */
1436 rx_buf = &fp->rx_buf_ring[bd_cons];
1438 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1439 pad = cqe->fast_path_cqe.placement_offset;
1441 /* If CQE is marked both TPA_START and TPA_END
1442 it is a non-TPA CQE */
1443 if ((!fp->disable_tpa) &&
1444 (TPA_TYPE(cqe_fp_flags) !=
1445 (TPA_TYPE_START | TPA_TYPE_END))) {
1446 queue = cqe->fast_path_cqe.queue_index;
1448 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1449 DP(NETIF_MSG_RX_STATUS,
1450 "calling tpa_start on queue %d\n",
1453 bnx2x_tpa_start(fp, queue, skb,
1458 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1459 DP(NETIF_MSG_RX_STATUS,
1460 "calling tpa_stop on queue %d\n",
1463 if (!BNX2X_RX_SUM_FIX(cqe))
1464 BNX2X_ERR("STOP on none TCP "
1467 /* This is a size of the linear data
1469 len = le16_to_cpu(cqe->fast_path_cqe.
1471 bnx2x_tpa_stop(bp, fp, queue, pad,
1472 len, cqe, comp_ring_cons);
1473 #ifdef BNX2X_STOP_ON_ERROR
1478 bnx2x_update_sge_prod(fp,
1479 &cqe->fast_path_cqe);
1484 pci_dma_sync_single_for_device(bp->pdev,
1485 pci_unmap_addr(rx_buf, mapping),
1486 pad + RX_COPY_THRESH,
1487 PCI_DMA_FROMDEVICE);
1489 prefetch(((char *)(skb)) + 128);
1491 /* is this an error packet? */
1492 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1493 DP(NETIF_MSG_RX_ERR,
1494 "ERROR flags %x rx packet %u\n",
1495 cqe_fp_flags, sw_comp_cons);
1496 bp->eth_stats.rx_err_discard_pkt++;
1500 /* Since we don't have a jumbo ring
1501 * copy small packets if mtu > 1500
1503 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1504 (len <= RX_COPY_THRESH)) {
1505 struct sk_buff *new_skb;
1507 new_skb = netdev_alloc_skb(bp->dev,
1509 if (new_skb == NULL) {
1510 DP(NETIF_MSG_RX_ERR,
1511 "ERROR packet dropped "
1512 "because of alloc failure\n");
1513 bp->eth_stats.rx_skb_alloc_failed++;
1518 skb_copy_from_linear_data_offset(skb, pad,
1519 new_skb->data + pad, len);
1520 skb_reserve(new_skb, pad);
1521 skb_put(new_skb, len);
1523 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1527 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1528 pci_unmap_single(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 bp->rx_buf_use_size,
1531 PCI_DMA_FROMDEVICE);
1532 skb_reserve(skb, pad);
1536 DP(NETIF_MSG_RX_ERR,
1537 "ERROR packet dropped because "
1538 "of alloc failure\n");
1539 bp->eth_stats.rx_skb_alloc_failed++;
1541 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1545 skb->protocol = eth_type_trans(skb, bp->dev);
1547 skb->ip_summed = CHECKSUM_NONE;
1549 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
1552 bp->eth_stats.hw_csum_err++;
1557 if ((bp->vlgrp != NULL) &&
1558 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1559 PARSING_FLAGS_VLAN))
1560 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1561 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1564 netif_receive_skb(skb);
1566 bp->dev->last_rx = jiffies;
1571 bd_cons = NEXT_RX_IDX(bd_cons);
1572 bd_prod = NEXT_RX_IDX(bd_prod);
1573 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1576 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1577 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1579 if (rx_pkt == budget)
1583 fp->rx_bd_cons = bd_cons;
1584 fp->rx_bd_prod = bd_prod_fw;
1585 fp->rx_comp_cons = sw_comp_cons;
1586 fp->rx_comp_prod = sw_comp_prod;
1588 /* Update producers */
1589 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1591 mmiowb(); /* keep prod updates ordered */
1593 fp->rx_pkt += rx_pkt;
1599 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1601 struct bnx2x_fastpath *fp = fp_cookie;
1602 struct bnx2x *bp = fp->bp;
1603 struct net_device *dev = bp->dev;
1604 int index = FP_IDX(fp);
1606 /* Return here if interrupt is disabled */
1607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1613 index, FP_SB_ID(fp));
1614 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1616 #ifdef BNX2X_STOP_ON_ERROR
1617 if (unlikely(bp->panic))
1621 prefetch(fp->rx_cons_sb);
1622 prefetch(fp->tx_cons_sb);
1623 prefetch(&fp->status_blk->c_status_block.status_block_index);
1624 prefetch(&fp->status_blk->u_status_block.status_block_index);
1626 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1631 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1633 struct net_device *dev = dev_instance;
1634 struct bnx2x *bp = netdev_priv(dev);
1635 u16 status = bnx2x_ack_int(bp);
1638 /* Return here if interrupt is shared and it's not for us */
1639 if (unlikely(status == 0)) {
1640 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1643 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1645 #ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1650 /* Return here if interrupt is disabled */
1651 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1652 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656 mask = 0x2 << bp->fp[0].sb_id;
1657 if (status & mask) {
1658 struct bnx2x_fastpath *fp = &bp->fp[0];
1660 prefetch(fp->rx_cons_sb);
1661 prefetch(fp->tx_cons_sb);
1662 prefetch(&fp->status_blk->c_status_block.status_block_index);
1663 prefetch(&fp->status_blk->u_status_block.status_block_index);
1665 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1671 if (unlikely(status & 0x1)) {
1672 schedule_work(&bp->sp_task);
1680 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1686 /* end of fast path */
1688 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1693 * General service functions
1696 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1699 u32 resource_bit = (1 << resource);
1700 int func = BP_FUNC(bp);
1701 u32 hw_lock_control_reg;
1704 /* Validating that the resource is within range */
1705 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1707 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1708 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1713 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1715 hw_lock_control_reg =
1716 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1719 /* Validating that the resource is not already taken */
1720 lock_status = REG_RD(bp, hw_lock_control_reg);
1721 if (lock_status & resource_bit) {
1722 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1723 lock_status, resource_bit);
1727 /* Try for 1 second every 5ms */
1728 for (cnt = 0; cnt < 200; cnt++) {
1729 /* Try to acquire the lock */
1730 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1731 lock_status = REG_RD(bp, hw_lock_control_reg);
1732 if (lock_status & resource_bit)
1737 DP(NETIF_MSG_HW, "Timeout\n");
1741 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1744 u32 resource_bit = (1 << resource);
1745 int func = BP_FUNC(bp);
1746 u32 hw_lock_control_reg;
1748 /* Validating that the resource is within range */
1749 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1751 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1752 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1757 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1759 hw_lock_control_reg =
1760 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1763 /* Validating that the resource is currently taken */
1764 lock_status = REG_RD(bp, hw_lock_control_reg);
1765 if (!(lock_status & resource_bit)) {
1766 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1767 lock_status, resource_bit);
1771 REG_WR(bp, hw_lock_control_reg, resource_bit);
1775 /* HW Lock for shared dual port PHYs */
1776 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1778 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1780 mutex_lock(&bp->port.phy_mutex);
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1789 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1791 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1792 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1795 mutex_unlock(&bp->port.phy_mutex);
1798 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1800 /* The GPIO should be swapped if swap register is set and active */
1801 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1802 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1803 int gpio_shift = gpio_num +
1804 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1805 u32 gpio_mask = (1 << gpio_shift);
1808 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1809 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1814 /* read GPIO and mask except the float bits */
1815 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1818 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1819 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1820 gpio_num, gpio_shift);
1821 /* clear FLOAT and set CLR */
1822 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1823 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1826 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1827 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1828 gpio_num, gpio_shift);
1829 /* clear FLOAT and set SET */
1830 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1834 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1835 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1836 gpio_num, gpio_shift);
1838 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1845 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1851 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1853 u32 spio_mask = (1 << spio_num);
1856 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1857 (spio_num > MISC_REGISTERS_SPIO_7)) {
1858 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1863 /* read SPIO and mask except the float bits */
1864 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1867 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1869 /* clear FLOAT and set CLR */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1874 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1876 /* clear FLOAT and set SET */
1877 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1881 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1882 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1884 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1891 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1892 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1897 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1899 switch (bp->link_vars.ieee_fc) {
1900 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1901 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1905 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1909 bp->port.advertising |= ADVERTISED_Asym_Pause;
1912 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1918 static void bnx2x_link_report(struct bnx2x *bp)
1920 if (bp->link_vars.link_up) {
1921 if (bp->state == BNX2X_STATE_OPEN)
1922 netif_carrier_on(bp->dev);
1923 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1925 printk("%d Mbps ", bp->link_vars.line_speed);
1927 if (bp->link_vars.duplex == DUPLEX_FULL)
1928 printk("full duplex");
1930 printk("half duplex");
1932 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1933 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1934 printk(", receive ");
1935 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1936 printk("& transmit ");
1938 printk(", transmit ");
1940 printk("flow control ON");
1944 } else { /* link_down */
1945 netif_carrier_off(bp->dev);
1946 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1950 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1952 if (!BP_NOMCP(bp)) {
1955 /* Initialize link parameters structure variables */
1956 bp->link_params.mtu = bp->dev->mtu;
1958 bnx2x_acquire_phy_lock(bp);
1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960 bnx2x_release_phy_lock(bp);
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
1965 bnx2x_calc_fc_adv(bp);
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1973 static void bnx2x_link_set(struct bnx2x *bp)
1975 if (!BP_NOMCP(bp)) {
1976 bnx2x_acquire_phy_lock(bp);
1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978 bnx2x_release_phy_lock(bp);
1980 bnx2x_calc_fc_adv(bp);
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1987 if (!BP_NOMCP(bp)) {
1988 bnx2x_acquire_phy_lock(bp);
1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990 bnx2x_release_phy_lock(bp);
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1999 bnx2x_acquire_phy_lock(bp);
2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001 bnx2x_release_phy_lock(bp);
2006 /* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2012 0 - if all the min_rates are 0.
2013 In the later case fainess algorithm should be deactivated.
2014 If not all min_rates are zero then those that are zeroes will
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019 int i, port = BP_PORT(bp);
2023 for (i = 0; i < E1HVN_MAX; i++) {
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2031 vn_min_rate = DEF_MIN_RATE;
2035 wsum += vn_min_rate;
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2049 struct cmng_struct_per_port *m_cmng_port)
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057 /* Enable minmax only if we are in e1hmf mode */
2059 u32 fair_periodic_timeout_usec;
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2106 "Single function mode minmax will be disabled\n");
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
2136 This is a requirment of the algorithm. */
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156 #ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2167 protocol_counters[protocol].rate/8));
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2183 #ifdef BNX2X_PER_PROT_QOS
2185 u32 protocolWeightSum = 0;
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225 bnx2x_acquire_phy_lock(bp);
2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227 bnx2x_release_phy_lock(bp);
2229 if (bp->link_vars.link_up) {
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2254 func = ((vn << 1) | BP_PORT(bp));
2256 /* Set the attention towards other drivers
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2266 int port = BP_PORT(bp);
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2283 if (bp->state != BNX2X_STATE_OPEN)
2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2299 int port = BP_PORT(bp);
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2318 * General service functions
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2325 int func = BP_FUNC(bp);
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333 #ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2338 spin_lock_bh(&bp->spq_lock);
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
2342 spin_unlock_bh(&bp->spq_lock);
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2374 spin_unlock_bh(&bp->spq_lock);
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2386 for (j = 0; j < i*10; j++) {
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2395 if (!(val & (1L << 31))) {
2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2416 barrier(); /* status block is written to by the chip */
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2441 * slow path service functions
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446 int port = BP_PORT(bp);
2447 int func = BP_FUNC(bp);
2448 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
2454 if (~bp->aeu_mask & (asserted & 0xff))
2455 BNX2X_ERR("IGU ERROR\n");
2456 if (bp->attn_state & asserted)
2457 BNX2X_ERR("IGU ERROR\n");
2459 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2460 bp->aeu_mask, asserted);
2461 bp->aeu_mask &= ~(asserted & 0xff);
2462 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2464 REG_WR(bp, aeu_addr, bp->aeu_mask);
2466 bp->attn_state |= asserted;
2468 if (asserted & ATTN_HARD_WIRED_MASK) {
2469 if (asserted & ATTN_NIG_FOR_FUNC) {
2471 /* save nig interrupt mask */
2472 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2473 REG_WR(bp, nig_int_mask_addr, 0);
2475 bnx2x_link_attn(bp);
2477 /* handle unicore attn? */
2479 if (asserted & ATTN_SW_TIMER_4_FUNC)
2480 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2482 if (asserted & GPIO_2_FUNC)
2483 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2485 if (asserted & GPIO_3_FUNC)
2486 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2488 if (asserted & GPIO_4_FUNC)
2489 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2492 if (asserted & ATTN_GENERAL_ATTN_1) {
2493 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2494 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2496 if (asserted & ATTN_GENERAL_ATTN_2) {
2497 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2498 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2500 if (asserted & ATTN_GENERAL_ATTN_3) {
2501 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2502 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2505 if (asserted & ATTN_GENERAL_ATTN_4) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2509 if (asserted & ATTN_GENERAL_ATTN_5) {
2510 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2511 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2513 if (asserted & ATTN_GENERAL_ATTN_6) {
2514 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2515 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2519 } /* if hardwired */
2521 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2522 asserted, BAR_IGU_INTMEM + igu_addr);
2523 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2525 /* now set back the mask */
2526 if (asserted & ATTN_NIG_FOR_FUNC)
2527 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2530 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2532 int port = BP_PORT(bp);
2536 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2537 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2539 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2541 val = REG_RD(bp, reg_offset);
2542 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2543 REG_WR(bp, reg_offset, val);
2545 BNX2X_ERR("SPIO5 hw attention\n");
2547 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2548 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2549 /* Fan failure attention */
2551 /* The PHY reset is controled by GPIO 1 */
2552 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2553 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2554 /* Low power mode is controled by GPIO 2 */
2555 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2556 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2557 /* mark the failure */
2558 bp->link_params.ext_phy_config &=
2559 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2560 bp->link_params.ext_phy_config |=
2561 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2563 dev_info.port_hw_config[port].
2564 external_phy_config,
2565 bp->link_params.ext_phy_config);
2566 /* log the failure */
2567 printk(KERN_ERR PFX "Fan Failure on Network"
2568 " Controller %s has caused the driver to"
2569 " shutdown the card to prevent permanent"
2570 " damage. Please contact Dell Support for"
2571 " assistance\n", bp->dev->name);
2579 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2581 val = REG_RD(bp, reg_offset);
2582 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2583 REG_WR(bp, reg_offset, val);
2585 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2586 (attn & HW_INTERRUT_ASSERT_SET_0));
2591 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2595 if (attn & BNX2X_DOORQ_ASSERT) {
2597 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2598 BNX2X_ERR("DB hw attention 0x%x\n", val);
2599 /* DORQ discard attention */
2601 BNX2X_ERR("FATAL error from DORQ\n");
2604 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2606 int port = BP_PORT(bp);
2609 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2610 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2612 val = REG_RD(bp, reg_offset);
2613 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2614 REG_WR(bp, reg_offset, val);
2616 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2617 (attn & HW_INTERRUT_ASSERT_SET_1));
2622 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2626 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2628 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2629 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2630 /* CFC error attention */
2632 BNX2X_ERR("FATAL error from CFC\n");
2635 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2637 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2638 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2639 /* RQ_USDMDP_FIFO_OVERFLOW */
2641 BNX2X_ERR("FATAL error from PXP\n");
2644 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2646 int port = BP_PORT(bp);
2649 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2650 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2652 val = REG_RD(bp, reg_offset);
2653 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2654 REG_WR(bp, reg_offset, val);
2656 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2657 (attn & HW_INTERRUT_ASSERT_SET_2));
2662 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2666 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2668 if (attn & BNX2X_PMF_LINK_ASSERT) {
2669 int func = BP_FUNC(bp);
2671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2672 bnx2x__link_status_update(bp);
2673 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2675 bnx2x_pmf_update(bp);
2677 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2679 BNX2X_ERR("MC assert!\n");
2680 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2681 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2682 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2686 } else if (attn & BNX2X_MCP_ASSERT) {
2688 BNX2X_ERR("MCP assert!\n");
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2693 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2696 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2697 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2698 if (attn & BNX2X_GRC_TIMEOUT) {
2699 val = CHIP_IS_E1H(bp) ?
2700 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2701 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2703 if (attn & BNX2X_GRC_RSV) {
2704 val = CHIP_IS_E1H(bp) ?
2705 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2706 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2708 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2712 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2714 struct attn_route attn;
2715 struct attn_route group_mask;
2716 int port = BP_PORT(bp);
2721 /* need to take HW lock because MCP or other port might also
2722 try to handle this event */
2723 bnx2x_acquire_alr(bp);
2725 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2726 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2727 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2728 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2729 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2730 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2732 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2733 if (deasserted & (1 << index)) {
2734 group_mask = bp->attn_group[index];
2736 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2737 index, group_mask.sig[0], group_mask.sig[1],
2738 group_mask.sig[2], group_mask.sig[3]);
2740 bnx2x_attn_int_deasserted3(bp,
2741 attn.sig[3] & group_mask.sig[3]);
2742 bnx2x_attn_int_deasserted1(bp,
2743 attn.sig[1] & group_mask.sig[1]);
2744 bnx2x_attn_int_deasserted2(bp,
2745 attn.sig[2] & group_mask.sig[2]);
2746 bnx2x_attn_int_deasserted0(bp,
2747 attn.sig[0] & group_mask.sig[0]);
2749 if ((attn.sig[0] & group_mask.sig[0] &
2750 HW_PRTY_ASSERT_SET_0) ||
2751 (attn.sig[1] & group_mask.sig[1] &
2752 HW_PRTY_ASSERT_SET_1) ||
2753 (attn.sig[2] & group_mask.sig[2] &
2754 HW_PRTY_ASSERT_SET_2))
2755 BNX2X_ERR("FATAL HW block parity attention\n");
2759 bnx2x_release_alr(bp);
2761 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2764 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2765 val, BAR_IGU_INTMEM + reg_addr); */
2766 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2768 if (bp->aeu_mask & (deasserted & 0xff))
2769 BNX2X_ERR("IGU BUG!\n");
2770 if (~bp->attn_state & deasserted)
2771 BNX2X_ERR("IGU BUG!\n");
2773 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2774 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2776 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2777 bp->aeu_mask |= (deasserted & 0xff);
2779 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2780 REG_WR(bp, reg_addr, bp->aeu_mask);
2782 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2783 bp->attn_state &= ~deasserted;
2784 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2787 static void bnx2x_attn_int(struct bnx2x *bp)
2789 /* read local copy of bits */
2790 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2791 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2792 u32 attn_state = bp->attn_state;
2794 /* look for changed bits */
2795 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2796 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2799 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2800 attn_bits, attn_ack, asserted, deasserted);
2802 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2803 BNX2X_ERR("BAD attention state\n");
2805 /* handle bits that were raised */
2807 bnx2x_attn_int_asserted(bp, asserted);
2810 bnx2x_attn_int_deasserted(bp, deasserted);
2813 static void bnx2x_sp_task(struct work_struct *work)
2815 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2819 /* Return here if interrupt is disabled */
2820 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2821 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2825 status = bnx2x_update_dsb_idx(bp);
2826 /* if (status == 0) */
2827 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2829 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2835 /* CStorm events: query_stats, port delete ramrod */
2837 bp->stats_pending = 0;
2839 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2841 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2843 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2845 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2847 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2852 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2854 struct net_device *dev = dev_instance;
2855 struct bnx2x *bp = netdev_priv(dev);
2857 /* Return here if interrupt is disabled */
2858 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2859 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2863 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2865 #ifdef BNX2X_STOP_ON_ERROR
2866 if (unlikely(bp->panic))
2870 schedule_work(&bp->sp_task);
2875 /* end of slow path */
2879 /****************************************************************************
2881 ****************************************************************************/
2883 /* sum[hi:lo] += add[hi:lo] */
2884 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2887 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2890 /* difference = minuend - subtrahend */
2891 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2893 if (m_lo < s_lo) { \
2895 d_hi = m_hi - s_hi; \
2897 /* we can 'loan' 1 */ \
2899 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2901 /* m_hi <= s_hi */ \
2906 /* m_lo >= s_lo */ \
2907 if (m_hi < s_hi) { \
2911 /* m_hi >= s_hi */ \
2912 d_hi = m_hi - s_hi; \
2913 d_lo = m_lo - s_lo; \
2918 #define UPDATE_STAT64(s, t) \
2920 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2921 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2922 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2923 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2924 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2925 pstats->mac_stx[1].t##_lo, diff.lo); \
2928 #define UPDATE_STAT64_NIG(s, t) \
2930 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2931 diff.lo, new->s##_lo, old->s##_lo); \
2932 ADD_64(estats->t##_hi, diff.hi, \
2933 estats->t##_lo, diff.lo); \
2936 /* sum[hi:lo] += add */
2937 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2940 s_hi += (s_lo < a) ? 1 : 0; \
2943 #define UPDATE_EXTEND_STAT(s) \
2945 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2946 pstats->mac_stx[1].s##_lo, \
2950 #define UPDATE_EXTEND_TSTAT(s, t) \
2952 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2953 old_tclient->s = le32_to_cpu(tclient->s); \
2954 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2957 #define UPDATE_EXTEND_XSTAT(s, t) \
2959 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2960 old_xclient->s = le32_to_cpu(xclient->s); \
2961 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2965 * General service functions
2968 static inline long bnx2x_hilo(u32 *hiref)
2970 u32 lo = *(hiref + 1);
2971 #if (BITS_PER_LONG == 64)
2974 return HILO_U64(hi, lo);
2981 * Init service functions
2984 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2986 if (!bp->stats_pending) {
2987 struct eth_query_ramrod_data ramrod_data = {0};
2990 ramrod_data.drv_counter = bp->stats_counter++;
2991 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2992 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2994 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2995 ((u32 *)&ramrod_data)[1],
2996 ((u32 *)&ramrod_data)[0], 0);
2998 /* stats ramrod has it's own slot on the spq */
3000 bp->stats_pending = 1;
3005 static void bnx2x_stats_init(struct bnx2x *bp)
3007 int port = BP_PORT(bp);
3009 bp->executer_idx = 0;
3010 bp->stats_counter = 0;
3014 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3016 bp->port.port_stx = 0;
3017 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3019 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3020 bp->port.old_nig_stats.brb_discard =
3021 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3022 bp->port.old_nig_stats.brb_truncate =
3023 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3024 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3025 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3026 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3027 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3029 /* function stats */
3030 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3031 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3032 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3033 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3035 bp->stats_state = STATS_STATE_DISABLED;
3036 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3037 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3040 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3042 struct dmae_command *dmae = &bp->stats_dmae;
3043 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3045 *stats_comp = DMAE_COMP_VAL;
3048 if (bp->executer_idx) {
3049 int loader_idx = PMF_DMAE_C(bp);
3051 memset(dmae, 0, sizeof(struct dmae_command));
3053 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3054 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3055 DMAE_CMD_DST_RESET |
3057 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3059 DMAE_CMD_ENDIANITY_DW_SWAP |
3061 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3063 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3064 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3065 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3066 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3067 sizeof(struct dmae_command) *
3068 (loader_idx + 1)) >> 2;
3069 dmae->dst_addr_hi = 0;
3070 dmae->len = sizeof(struct dmae_command) >> 2;
3073 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3074 dmae->comp_addr_hi = 0;
3078 bnx2x_post_dmae(bp, dmae, loader_idx);
3080 } else if (bp->func_stx) {
3082 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3086 static int bnx2x_stats_comp(struct bnx2x *bp)
3088 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3092 while (*stats_comp != DMAE_COMP_VAL) {
3095 BNX2X_ERR("timeout waiting for stats finished\n");
3104 * Statistics service functions
3107 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3109 struct dmae_command *dmae;
3111 int loader_idx = PMF_DMAE_C(bp);
3112 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3115 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3116 BNX2X_ERR("BUG!\n");
3120 bp->executer_idx = 0;
3122 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3124 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3126 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3128 DMAE_CMD_ENDIANITY_DW_SWAP |
3130 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3131 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3133 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3134 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3135 dmae->src_addr_lo = bp->port.port_stx >> 2;
3136 dmae->src_addr_hi = 0;
3137 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3138 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3139 dmae->len = DMAE_LEN32_RD_MAX;
3140 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3141 dmae->comp_addr_hi = 0;
3144 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3145 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3146 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3147 dmae->src_addr_hi = 0;
3148 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3149 DMAE_LEN32_RD_MAX * 4);
3150 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3151 DMAE_LEN32_RD_MAX * 4);
3152 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3153 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3154 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3155 dmae->comp_val = DMAE_COMP_VAL;
3158 bnx2x_hw_stats_post(bp);
3159 bnx2x_stats_comp(bp);
3162 static void bnx2x_port_stats_init(struct bnx2x *bp)
3164 struct dmae_command *dmae;
3165 int port = BP_PORT(bp);
3166 int vn = BP_E1HVN(bp);
3168 int loader_idx = PMF_DMAE_C(bp);
3170 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3173 if (!bp->link_vars.link_up || !bp->port.pmf) {
3174 BNX2X_ERR("BUG!\n");
3178 bp->executer_idx = 0;
3181 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3182 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3183 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3185 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3187 DMAE_CMD_ENDIANITY_DW_SWAP |
3189 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3190 (vn << DMAE_CMD_E1HVN_SHIFT));
3192 if (bp->port.port_stx) {
3194 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3195 dmae->opcode = opcode;
3196 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3197 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3198 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3199 dmae->dst_addr_hi = 0;
3200 dmae->len = sizeof(struct host_port_stats) >> 2;
3201 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3202 dmae->comp_addr_hi = 0;
3208 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3209 dmae->opcode = opcode;
3210 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3211 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3212 dmae->dst_addr_lo = bp->func_stx >> 2;
3213 dmae->dst_addr_hi = 0;
3214 dmae->len = sizeof(struct host_func_stats) >> 2;
3215 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3216 dmae->comp_addr_hi = 0;
3221 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3222 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3227 DMAE_CMD_ENDIANITY_DW_SWAP |
3229 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3230 (vn << DMAE_CMD_E1HVN_SHIFT));
3232 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3234 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3235 NIG_REG_INGRESS_BMAC0_MEM);
3237 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3238 BIGMAC_REGISTER_TX_STAT_GTBYT */
3239 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3240 dmae->opcode = opcode;
3241 dmae->src_addr_lo = (mac_addr +
3242 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3243 dmae->src_addr_hi = 0;
3244 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3245 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3246 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3247 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3248 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249 dmae->comp_addr_hi = 0;
3252 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3253 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3254 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3255 dmae->opcode = opcode;
3256 dmae->src_addr_lo = (mac_addr +
3257 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3258 dmae->src_addr_hi = 0;
3259 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3260 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3261 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3262 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3263 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3264 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3265 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3266 dmae->comp_addr_hi = 0;
3269 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3271 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3273 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275 dmae->opcode = opcode;
3276 dmae->src_addr_lo = (mac_addr +
3277 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3278 dmae->src_addr_hi = 0;
3279 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3280 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3281 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3282 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3283 dmae->comp_addr_hi = 0;
3286 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3287 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3288 dmae->opcode = opcode;
3289 dmae->src_addr_lo = (mac_addr +
3290 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3291 dmae->src_addr_hi = 0;
3292 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3293 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3294 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3295 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3297 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3298 dmae->comp_addr_hi = 0;
3301 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3302 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3303 dmae->opcode = opcode;
3304 dmae->src_addr_lo = (mac_addr +
3305 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3306 dmae->src_addr_hi = 0;
3307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3308 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3309 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3310 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3311 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3318 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3319 dmae->opcode = opcode;
3320 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3321 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3322 dmae->src_addr_hi = 0;
3323 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3324 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3325 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3326 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3327 dmae->comp_addr_hi = 0;
3330 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3331 dmae->opcode = opcode;
3332 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3333 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3334 dmae->src_addr_hi = 0;
3335 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3336 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3337 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3338 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3339 dmae->len = (2*sizeof(u32)) >> 2;
3340 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3341 dmae->comp_addr_hi = 0;
3344 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3345 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3346 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3347 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3349 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3351 DMAE_CMD_ENDIANITY_DW_SWAP |
3353 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3354 (vn << DMAE_CMD_E1HVN_SHIFT));
3355 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3356 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3357 dmae->src_addr_hi = 0;
3358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3359 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3361 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3362 dmae->len = (2*sizeof(u32)) >> 2;
3363 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3364 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3365 dmae->comp_val = DMAE_COMP_VAL;
3370 static void bnx2x_func_stats_init(struct bnx2x *bp)
3372 struct dmae_command *dmae = &bp->stats_dmae;
3373 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3376 if (!bp->func_stx) {
3377 BNX2X_ERR("BUG!\n");
3381 bp->executer_idx = 0;
3382 memset(dmae, 0, sizeof(struct dmae_command));
3384 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3385 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3386 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3388 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3390 DMAE_CMD_ENDIANITY_DW_SWAP |
3392 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3393 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3394 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3395 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3396 dmae->dst_addr_lo = bp->func_stx >> 2;
3397 dmae->dst_addr_hi = 0;
3398 dmae->len = sizeof(struct host_func_stats) >> 2;
3399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3401 dmae->comp_val = DMAE_COMP_VAL;
3406 static void bnx2x_stats_start(struct bnx2x *bp)
3409 bnx2x_port_stats_init(bp);
3411 else if (bp->func_stx)
3412 bnx2x_func_stats_init(bp);
3414 bnx2x_hw_stats_post(bp);
3415 bnx2x_storm_stats_post(bp);
3418 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3420 bnx2x_stats_comp(bp);
3421 bnx2x_stats_pmf_update(bp);
3422 bnx2x_stats_start(bp);
3425 static void bnx2x_stats_restart(struct bnx2x *bp)
3427 bnx2x_stats_comp(bp);
3428 bnx2x_stats_start(bp);
3431 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3433 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3434 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3435 struct regpair diff;
3437 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3438 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3439 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3440 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3441 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3442 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3443 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3444 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3445 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3446 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3447 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3448 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3449 UPDATE_STAT64(tx_stat_gt127,
3450 tx_stat_etherstatspkts65octetsto127octets);
3451 UPDATE_STAT64(tx_stat_gt255,
3452 tx_stat_etherstatspkts128octetsto255octets);
3453 UPDATE_STAT64(tx_stat_gt511,
3454 tx_stat_etherstatspkts256octetsto511octets);
3455 UPDATE_STAT64(tx_stat_gt1023,
3456 tx_stat_etherstatspkts512octetsto1023octets);
3457 UPDATE_STAT64(tx_stat_gt1518,
3458 tx_stat_etherstatspkts1024octetsto1522octets);
3459 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3460 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3461 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3462 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3463 UPDATE_STAT64(tx_stat_gterr,
3464 tx_stat_dot3statsinternalmactransmiterrors);
3465 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3468 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3470 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3471 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3473 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3474 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3475 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3476 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3477 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3478 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3479 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3480 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3481 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3482 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3483 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3484 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3485 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3486 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3487 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3488 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3489 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3490 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3491 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3492 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3493 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3494 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3495 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3496 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3497 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3498 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3506 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3508 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3509 struct nig_stats *old = &(bp->port.old_nig_stats);
3510 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3511 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3512 struct regpair diff;
3514 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3515 bnx2x_bmac_stats_update(bp);
3517 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3518 bnx2x_emac_stats_update(bp);
3520 else { /* unreached */
3521 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3525 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3526 new->brb_discard - old->brb_discard);
3527 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3528 new->brb_truncate - old->brb_truncate);
3530 UPDATE_STAT64_NIG(egress_mac_pkt0,
3531 etherstatspkts1024octetsto1522octets);
3532 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3534 memcpy(old, new, sizeof(struct nig_stats));
3536 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3537 sizeof(struct mac_stx));
3538 estats->brb_drop_hi = pstats->brb_drop_hi;
3539 estats->brb_drop_lo = pstats->brb_drop_lo;
3541 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3546 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3548 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3549 int cl_id = BP_CL_ID(bp);
3550 struct tstorm_per_port_stats *tport =
3551 &stats->tstorm_common.port_statistics;
3552 struct tstorm_per_client_stats *tclient =
3553 &stats->tstorm_common.client_statistics[cl_id];
3554 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3555 struct xstorm_per_client_stats *xclient =
3556 &stats->xstorm_common.client_statistics[cl_id];
3557 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3558 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3559 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3562 /* are storm stats valid? */
3563 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3564 bp->stats_counter) {
3565 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3566 " tstorm counter (%d) != stats_counter (%d)\n",
3567 tclient->stats_counter, bp->stats_counter);
3570 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3571 bp->stats_counter) {
3572 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3573 " xstorm counter (%d) != stats_counter (%d)\n",
3574 xclient->stats_counter, bp->stats_counter);
3578 fstats->total_bytes_received_hi =
3579 fstats->valid_bytes_received_hi =
3580 le32_to_cpu(tclient->total_rcv_bytes.hi);
3581 fstats->total_bytes_received_lo =
3582 fstats->valid_bytes_received_lo =
3583 le32_to_cpu(tclient->total_rcv_bytes.lo);
3585 estats->error_bytes_received_hi =
3586 le32_to_cpu(tclient->rcv_error_bytes.hi);
3587 estats->error_bytes_received_lo =
3588 le32_to_cpu(tclient->rcv_error_bytes.lo);
3589 ADD_64(estats->error_bytes_received_hi,
3590 estats->rx_stat_ifhcinbadoctets_hi,
3591 estats->error_bytes_received_lo,
3592 estats->rx_stat_ifhcinbadoctets_lo);
3594 ADD_64(fstats->total_bytes_received_hi,
3595 estats->error_bytes_received_hi,
3596 fstats->total_bytes_received_lo,
3597 estats->error_bytes_received_lo);
3599 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3600 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3601 total_multicast_packets_received);
3602 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3603 total_broadcast_packets_received);
3605 fstats->total_bytes_transmitted_hi =
3606 le32_to_cpu(xclient->total_sent_bytes.hi);
3607 fstats->total_bytes_transmitted_lo =
3608 le32_to_cpu(xclient->total_sent_bytes.lo);
3610 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3611 total_unicast_packets_transmitted);
3612 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3613 total_multicast_packets_transmitted);
3614 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3615 total_broadcast_packets_transmitted);
3617 memcpy(estats, &(fstats->total_bytes_received_hi),
3618 sizeof(struct host_func_stats) - 2*sizeof(u32));
3620 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3621 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3622 estats->brb_truncate_discard =
3623 le32_to_cpu(tport->brb_truncate_discard);
3624 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3626 old_tclient->rcv_unicast_bytes.hi =
3627 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3628 old_tclient->rcv_unicast_bytes.lo =
3629 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3630 old_tclient->rcv_broadcast_bytes.hi =
3631 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3632 old_tclient->rcv_broadcast_bytes.lo =
3633 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3634 old_tclient->rcv_multicast_bytes.hi =
3635 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3636 old_tclient->rcv_multicast_bytes.lo =
3637 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3638 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3640 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3641 old_tclient->packets_too_big_discard =
3642 le32_to_cpu(tclient->packets_too_big_discard);
3643 estats->no_buff_discard =
3644 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3645 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3647 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3648 old_xclient->unicast_bytes_sent.hi =
3649 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3650 old_xclient->unicast_bytes_sent.lo =
3651 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3652 old_xclient->multicast_bytes_sent.hi =
3653 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3654 old_xclient->multicast_bytes_sent.lo =
3655 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3656 old_xclient->broadcast_bytes_sent.hi =
3657 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3658 old_xclient->broadcast_bytes_sent.lo =
3659 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3661 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3666 static void bnx2x_net_stats_update(struct bnx2x *bp)
3668 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3669 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3670 struct net_device_stats *nstats = &bp->dev->stats;
3672 nstats->rx_packets =
3673 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3674 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3675 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3677 nstats->tx_packets =
3678 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3679 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3680 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3682 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3684 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3686 nstats->rx_dropped = old_tclient->checksum_discard +
3687 estats->mac_discard;
3688 nstats->tx_dropped = 0;
3691 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3693 nstats->collisions =
3694 estats->tx_stat_dot3statssinglecollisionframes_lo +
3695 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3696 estats->tx_stat_dot3statslatecollisions_lo +
3697 estats->tx_stat_dot3statsexcessivecollisions_lo;
3699 estats->jabber_packets_received =
3700 old_tclient->packets_too_big_discard +
3701 estats->rx_stat_dot3statsframestoolong_lo;
3703 nstats->rx_length_errors =
3704 estats->rx_stat_etherstatsundersizepkts_lo +
3705 estats->jabber_packets_received;
3706 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3707 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3708 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3709 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3710 nstats->rx_missed_errors = estats->xxoverflow_discard;
3712 nstats->rx_errors = nstats->rx_length_errors +
3713 nstats->rx_over_errors +
3714 nstats->rx_crc_errors +
3715 nstats->rx_frame_errors +
3716 nstats->rx_fifo_errors +
3717 nstats->rx_missed_errors;
3719 nstats->tx_aborted_errors =
3720 estats->tx_stat_dot3statslatecollisions_lo +
3721 estats->tx_stat_dot3statsexcessivecollisions_lo;
3722 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3723 nstats->tx_fifo_errors = 0;
3724 nstats->tx_heartbeat_errors = 0;
3725 nstats->tx_window_errors = 0;
3727 nstats->tx_errors = nstats->tx_aborted_errors +
3728 nstats->tx_carrier_errors;
3731 static void bnx2x_stats_update(struct bnx2x *bp)
3733 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3736 if (*stats_comp != DMAE_COMP_VAL)
3740 update = (bnx2x_hw_stats_update(bp) == 0);
3742 update |= (bnx2x_storm_stats_update(bp) == 0);
3745 bnx2x_net_stats_update(bp);
3748 if (bp->stats_pending) {
3749 bp->stats_pending++;
3750 if (bp->stats_pending == 3) {
3751 BNX2X_ERR("stats not updated for 3 times\n");
3758 if (bp->msglevel & NETIF_MSG_TIMER) {
3759 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3760 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3761 struct net_device_stats *nstats = &bp->dev->stats;
3764 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3765 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3767 bnx2x_tx_avail(bp->fp),
3768 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3769 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3771 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3772 bp->fp->rx_comp_cons),
3773 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3774 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3775 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3776 estats->driver_xoff, estats->brb_drop_lo);
3777 printk(KERN_DEBUG "tstats: checksum_discard %u "
3778 "packets_too_big_discard %u no_buff_discard %u "
3779 "mac_discard %u mac_filter_discard %u "
3780 "xxovrflow_discard %u brb_truncate_discard %u "
3781 "ttl0_discard %u\n",
3782 old_tclient->checksum_discard,
3783 old_tclient->packets_too_big_discard,
3784 old_tclient->no_buff_discard, estats->mac_discard,
3785 estats->mac_filter_discard, estats->xxoverflow_discard,
3786 estats->brb_truncate_discard,
3787 old_tclient->ttl0_discard);
3789 for_each_queue(bp, i) {
3790 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3791 bnx2x_fp(bp, i, tx_pkt),
3792 bnx2x_fp(bp, i, rx_pkt),
3793 bnx2x_fp(bp, i, rx_calls));
3797 bnx2x_hw_stats_post(bp);
3798 bnx2x_storm_stats_post(bp);
3801 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3803 struct dmae_command *dmae;
3805 int loader_idx = PMF_DMAE_C(bp);
3806 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3808 bp->executer_idx = 0;
3810 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3812 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3814 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3816 DMAE_CMD_ENDIANITY_DW_SWAP |
3818 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3819 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3821 if (bp->port.port_stx) {
3823 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3825 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3827 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3828 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3829 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3830 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3831 dmae->dst_addr_hi = 0;
3832 dmae->len = sizeof(struct host_port_stats) >> 2;
3834 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3835 dmae->comp_addr_hi = 0;
3838 dmae->comp_addr_lo =
3839 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3840 dmae->comp_addr_hi =
3841 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3842 dmae->comp_val = DMAE_COMP_VAL;
3850 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3851 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3852 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3853 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3854 dmae->dst_addr_lo = bp->func_stx >> 2;
3855 dmae->dst_addr_hi = 0;
3856 dmae->len = sizeof(struct host_func_stats) >> 2;
3857 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3858 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3859 dmae->comp_val = DMAE_COMP_VAL;
3865 static void bnx2x_stats_stop(struct bnx2x *bp)
3869 bnx2x_stats_comp(bp);
3872 update = (bnx2x_hw_stats_update(bp) == 0);
3874 update |= (bnx2x_storm_stats_update(bp) == 0);
3877 bnx2x_net_stats_update(bp);
3880 bnx2x_port_stats_stop(bp);
3882 bnx2x_hw_stats_post(bp);
3883 bnx2x_stats_comp(bp);
3887 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3891 static const struct {
3892 void (*action)(struct bnx2x *bp);
3893 enum bnx2x_stats_state next_state;
3894 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3897 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3898 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3899 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3900 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3903 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3904 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3905 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3906 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3910 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3912 enum bnx2x_stats_state state = bp->stats_state;
3914 bnx2x_stats_stm[state][event].action(bp);
3915 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3917 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3918 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3919 state, event, bp->stats_state);
3922 static void bnx2x_timer(unsigned long data)
3924 struct bnx2x *bp = (struct bnx2x *) data;
3926 if (!netif_running(bp->dev))
3929 if (atomic_read(&bp->intr_sem) != 0)
3933 struct bnx2x_fastpath *fp = &bp->fp[0];
3936 bnx2x_tx_int(fp, 1000);
3937 rc = bnx2x_rx_int(fp, 1000);
3940 if (!BP_NOMCP(bp)) {
3941 int func = BP_FUNC(bp);
3945 ++bp->fw_drv_pulse_wr_seq;
3946 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3947 /* TBD - add SYSTEM_TIME */
3948 drv_pulse = bp->fw_drv_pulse_wr_seq;
3949 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3951 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3952 MCP_PULSE_SEQ_MASK);
3953 /* The delta between driver pulse and mcp response
3954 * should be 1 (before mcp response) or 0 (after mcp response)
3956 if ((drv_pulse != mcp_pulse) &&
3957 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3958 /* someone lost a heartbeat... */
3959 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3960 drv_pulse, mcp_pulse);
3964 if ((bp->state == BNX2X_STATE_OPEN) ||
3965 (bp->state == BNX2X_STATE_DISABLED))
3966 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3969 mod_timer(&bp->timer, jiffies + bp->current_interval);
3972 /* end of Statistics */
3977 * nic init service functions
3980 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3982 int port = BP_PORT(bp);
3984 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3985 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3986 sizeof(struct ustorm_def_status_block)/4);
3987 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3988 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3989 sizeof(struct cstorm_def_status_block)/4);
3992 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3993 struct host_status_block *sb, dma_addr_t mapping)
3995 int port = BP_PORT(bp);
3996 int func = BP_FUNC(bp);
4001 section = ((u64)mapping) + offsetof(struct host_status_block,
4003 sb->u_status_block.status_block_id = sb_id;
4005 REG_WR(bp, BAR_USTRORM_INTMEM +
4006 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4007 REG_WR(bp, BAR_USTRORM_INTMEM +
4008 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4010 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4011 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4013 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4014 REG_WR16(bp, BAR_USTRORM_INTMEM +
4015 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4018 section = ((u64)mapping) + offsetof(struct host_status_block,
4020 sb->c_status_block.status_block_id = sb_id;
4022 REG_WR(bp, BAR_CSTRORM_INTMEM +
4023 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4024 REG_WR(bp, BAR_CSTRORM_INTMEM +
4025 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4027 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4028 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4030 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4031 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4032 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4034 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4037 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4039 int func = BP_FUNC(bp);
4041 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4042 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4043 sizeof(struct ustorm_def_status_block)/4);
4044 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4045 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4046 sizeof(struct cstorm_def_status_block)/4);
4047 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4048 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4049 sizeof(struct xstorm_def_status_block)/4);
4050 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4051 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052 sizeof(struct tstorm_def_status_block)/4);
4055 static void bnx2x_init_def_sb(struct bnx2x *bp,
4056 struct host_def_status_block *def_sb,
4057 dma_addr_t mapping, int sb_id)
4059 int port = BP_PORT(bp);
4060 int func = BP_FUNC(bp);
4061 int index, val, reg_offset;
4065 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4066 atten_status_block);
4067 def_sb->atten_status_block.status_block_id = sb_id;
4069 bp->def_att_idx = 0;
4072 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4073 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4075 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4076 bp->attn_group[index].sig[0] = REG_RD(bp,
4077 reg_offset + 0x10*index);
4078 bp->attn_group[index].sig[1] = REG_RD(bp,
4079 reg_offset + 0x4 + 0x10*index);
4080 bp->attn_group[index].sig[2] = REG_RD(bp,
4081 reg_offset + 0x8 + 0x10*index);
4082 bp->attn_group[index].sig[3] = REG_RD(bp,
4083 reg_offset + 0xc + 0x10*index);
4086 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4087 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4089 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4090 HC_REG_ATTN_MSG0_ADDR_L);
4092 REG_WR(bp, reg_offset, U64_LO(section));
4093 REG_WR(bp, reg_offset + 4, U64_HI(section));
4095 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4097 val = REG_RD(bp, reg_offset);
4099 REG_WR(bp, reg_offset, val);
4102 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4103 u_def_status_block);
4104 def_sb->u_def_status_block.status_block_id = sb_id;
4108 REG_WR(bp, BAR_USTRORM_INTMEM +
4109 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4110 REG_WR(bp, BAR_USTRORM_INTMEM +
4111 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4113 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4114 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4115 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4118 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4119 REG_WR16(bp, BAR_USTRORM_INTMEM +
4120 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4123 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4124 c_def_status_block);
4125 def_sb->c_def_status_block.status_block_id = sb_id;
4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4136 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4139 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4140 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4144 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4145 t_def_status_block);
4146 def_sb->t_def_status_block.status_block_id = sb_id;
4150 REG_WR(bp, BAR_TSTRORM_INTMEM +
4151 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4152 REG_WR(bp, BAR_TSTRORM_INTMEM +
4153 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4155 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4156 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4157 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4160 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4161 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4162 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4165 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4166 x_def_status_block);
4167 def_sb->x_def_status_block.status_block_id = sb_id;
4171 REG_WR(bp, BAR_XSTRORM_INTMEM +
4172 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4173 REG_WR(bp, BAR_XSTRORM_INTMEM +
4174 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4176 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4177 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4178 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4181 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4182 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4183 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4185 bp->stats_pending = 0;
4186 bp->set_mac_pending = 0;
4188 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4191 static void bnx2x_update_coalesce(struct bnx2x *bp)
4193 int port = BP_PORT(bp);
4196 for_each_queue(bp, i) {
4197 int sb_id = bp->fp[i].sb_id;
4199 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4200 REG_WR8(bp, BAR_USTRORM_INTMEM +
4201 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4202 HC_INDEX_U_ETH_RX_CQ_CONS),
4204 REG_WR16(bp, BAR_USTRORM_INTMEM +
4205 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4206 HC_INDEX_U_ETH_RX_CQ_CONS),
4207 bp->rx_ticks ? 0 : 1);
4209 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4210 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4211 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4212 HC_INDEX_C_ETH_TX_CQ_CONS),
4214 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4215 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4216 HC_INDEX_C_ETH_TX_CQ_CONS),
4217 bp->tx_ticks ? 0 : 1);
4221 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4222 struct bnx2x_fastpath *fp, int last)
4226 for (i = 0; i < last; i++) {
4227 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4228 struct sk_buff *skb = rx_buf->skb;
4231 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4235 if (fp->tpa_state[i] == BNX2X_TPA_START)
4236 pci_unmap_single(bp->pdev,
4237 pci_unmap_addr(rx_buf, mapping),
4238 bp->rx_buf_use_size,
4239 PCI_DMA_FROMDEVICE);
4246 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4248 int func = BP_FUNC(bp);
4249 u16 ring_prod, cqe_ring_prod = 0;
4252 bp->rx_buf_use_size = bp->dev->mtu;
4253 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4254 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4256 if (bp->flags & TPA_ENABLE_FLAG) {
4258 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4259 bp->rx_buf_use_size, bp->rx_buf_size,
4260 bp->dev->mtu + ETH_OVREHEAD);
4262 for_each_queue(bp, j) {
4263 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4264 struct bnx2x_fastpath *fp = &bp->fp[j];
4266 fp->tpa_pool[i].skb =
4267 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4268 if (!fp->tpa_pool[i].skb) {
4269 BNX2X_ERR("Failed to allocate TPA "
4270 "skb pool for queue[%d] - "
4271 "disabling TPA on this "
4273 bnx2x_free_tpa_pool(bp, fp, i);
4274 fp->disable_tpa = 1;
4277 pci_unmap_addr_set((struct sw_rx_bd *)
4278 &bp->fp->tpa_pool[i],
4280 fp->tpa_state[i] = BNX2X_TPA_STOP;
4285 for_each_queue(bp, j) {
4286 struct bnx2x_fastpath *fp = &bp->fp[j];
4289 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4290 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4292 /* "next page" elements initialization */
4294 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4295 struct eth_rx_sge *sge;
4297 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4299 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4300 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4302 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4303 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4306 bnx2x_init_sge_ring_bit_mask(fp);
4309 for (i = 1; i <= NUM_RX_RINGS; i++) {
4310 struct eth_rx_bd *rx_bd;
4312 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4314 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4315 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4317 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4318 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4322 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4323 struct eth_rx_cqe_next_page *nextpg;
4325 nextpg = (struct eth_rx_cqe_next_page *)
4326 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4328 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4329 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4331 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4332 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4335 /* Allocate SGEs and initialize the ring elements */
4336 for (i = 0, ring_prod = 0;
4337 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4339 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4340 BNX2X_ERR("was only able to allocate "
4342 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4343 /* Cleanup already allocated elements */
4344 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4345 bnx2x_free_tpa_pool(bp, fp,
4346 ETH_MAX_AGGREGATION_QUEUES_E1H);
4347 fp->disable_tpa = 1;
4351 ring_prod = NEXT_SGE_IDX(ring_prod);
4353 fp->rx_sge_prod = ring_prod;
4355 /* Allocate BDs and initialize BD ring */
4356 fp->rx_comp_cons = 0;
4357 cqe_ring_prod = ring_prod = 0;
4358 for (i = 0; i < bp->rx_ring_size; i++) {
4359 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4360 BNX2X_ERR("was only able to allocate "
4362 bp->eth_stats.rx_skb_alloc_failed++;
4365 ring_prod = NEXT_RX_IDX(ring_prod);
4366 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4367 WARN_ON(ring_prod <= i);
4370 fp->rx_bd_prod = ring_prod;
4371 /* must not have more available CQEs than BDs */
4372 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4374 fp->rx_pkt = fp->rx_calls = 0;
4377 * this will generate an interrupt (to the TSTORM)
4378 * must only be done after chip is initialized
4380 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4385 REG_WR(bp, BAR_USTRORM_INTMEM +
4386 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4387 U64_LO(fp->rx_comp_mapping));
4388 REG_WR(bp, BAR_USTRORM_INTMEM +
4389 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4390 U64_HI(fp->rx_comp_mapping));
4394 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4398 for_each_queue(bp, j) {
4399 struct bnx2x_fastpath *fp = &bp->fp[j];
4401 for (i = 1; i <= NUM_TX_RINGS; i++) {
4402 struct eth_tx_bd *tx_bd =
4403 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4406 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4407 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4409 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4410 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4413 fp->tx_pkt_prod = 0;
4414 fp->tx_pkt_cons = 0;
4417 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4422 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4424 int func = BP_FUNC(bp);
4426 spin_lock_init(&bp->spq_lock);
4428 bp->spq_left = MAX_SPQ_PENDING;
4429 bp->spq_prod_idx = 0;
4430 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4431 bp->spq_prod_bd = bp->spq;
4432 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4434 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4435 U64_LO(bp->spq_mapping));
4437 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4438 U64_HI(bp->spq_mapping));
4440 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4444 static void bnx2x_init_context(struct bnx2x *bp)
4448 for_each_queue(bp, i) {
4449 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4450 struct bnx2x_fastpath *fp = &bp->fp[i];
4451 u8 sb_id = FP_SB_ID(fp);
4453 context->xstorm_st_context.tx_bd_page_base_hi =
4454 U64_HI(fp->tx_desc_mapping);
4455 context->xstorm_st_context.tx_bd_page_base_lo =
4456 U64_LO(fp->tx_desc_mapping);
4457 context->xstorm_st_context.db_data_addr_hi =
4458 U64_HI(fp->tx_prods_mapping);
4459 context->xstorm_st_context.db_data_addr_lo =
4460 U64_LO(fp->tx_prods_mapping);
4461 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4462 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4464 context->ustorm_st_context.common.sb_index_numbers =
4465 BNX2X_RX_SB_INDEX_NUM;
4466 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4467 context->ustorm_st_context.common.status_block_id = sb_id;
4468 context->ustorm_st_context.common.flags =
4469 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4470 context->ustorm_st_context.common.mc_alignment_size = 64;
4471 context->ustorm_st_context.common.bd_buff_size =
4472 bp->rx_buf_use_size;
4473 context->ustorm_st_context.common.bd_page_base_hi =
4474 U64_HI(fp->rx_desc_mapping);
4475 context->ustorm_st_context.common.bd_page_base_lo =
4476 U64_LO(fp->rx_desc_mapping);
4477 if (!fp->disable_tpa) {
4478 context->ustorm_st_context.common.flags |=
4479 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4480 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4481 context->ustorm_st_context.common.sge_buff_size =
4482 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4483 context->ustorm_st_context.common.sge_page_base_hi =
4484 U64_HI(fp->rx_sge_mapping);
4485 context->ustorm_st_context.common.sge_page_base_lo =
4486 U64_LO(fp->rx_sge_mapping);
4489 context->cstorm_st_context.sb_index_number =
4490 HC_INDEX_C_ETH_TX_CQ_CONS;
4491 context->cstorm_st_context.status_block_id = sb_id;
4493 context->xstorm_ag_context.cdu_reserved =
4494 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4495 CDU_REGION_NUMBER_XCM_AG,
4496 ETH_CONNECTION_TYPE);
4497 context->ustorm_ag_context.cdu_usage =
4498 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4499 CDU_REGION_NUMBER_UCM_AG,
4500 ETH_CONNECTION_TYPE);
4504 static void bnx2x_init_ind_table(struct bnx2x *bp)
4506 int port = BP_PORT(bp);
4512 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4513 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4514 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4515 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4516 i % bp->num_queues);
4518 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4521 static void bnx2x_set_client_config(struct bnx2x *bp)
4523 struct tstorm_eth_client_config tstorm_client = {0};
4524 int port = BP_PORT(bp);
4527 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4528 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4529 tstorm_client.config_flags =
4530 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4532 if (bp->rx_mode && bp->vlgrp) {
4533 tstorm_client.config_flags |=
4534 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4535 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4539 if (bp->flags & TPA_ENABLE_FLAG) {
4540 tstorm_client.max_sges_for_packet =
4541 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4542 tstorm_client.max_sges_for_packet =
4543 ((tstorm_client.max_sges_for_packet +
4544 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4545 PAGES_PER_SGE_SHIFT;
4547 tstorm_client.config_flags |=
4548 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4551 for_each_queue(bp, i) {
4552 REG_WR(bp, BAR_TSTRORM_INTMEM +
4553 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4554 ((u32 *)&tstorm_client)[0]);
4555 REG_WR(bp, BAR_TSTRORM_INTMEM +
4556 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4557 ((u32 *)&tstorm_client)[1]);
4560 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4561 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4564 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4566 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4567 int mode = bp->rx_mode;
4568 int mask = (1 << BP_L_ID(bp));
4569 int func = BP_FUNC(bp);
4572 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4575 case BNX2X_RX_MODE_NONE: /* no Rx */
4576 tstorm_mac_filter.ucast_drop_all = mask;
4577 tstorm_mac_filter.mcast_drop_all = mask;
4578 tstorm_mac_filter.bcast_drop_all = mask;
4580 case BNX2X_RX_MODE_NORMAL:
4581 tstorm_mac_filter.bcast_accept_all = mask;
4583 case BNX2X_RX_MODE_ALLMULTI:
4584 tstorm_mac_filter.mcast_accept_all = mask;
4585 tstorm_mac_filter.bcast_accept_all = mask;
4587 case BNX2X_RX_MODE_PROMISC:
4588 tstorm_mac_filter.ucast_accept_all = mask;
4589 tstorm_mac_filter.mcast_accept_all = mask;
4590 tstorm_mac_filter.bcast_accept_all = mask;
4593 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4597 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4598 REG_WR(bp, BAR_TSTRORM_INTMEM +
4599 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4600 ((u32 *)&tstorm_mac_filter)[i]);
4602 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4603 ((u32 *)&tstorm_mac_filter)[i]); */
4606 if (mode != BNX2X_RX_MODE_NONE)
4607 bnx2x_set_client_config(bp);
4610 static void bnx2x_init_internal_common(struct bnx2x *bp)
4614 /* Zero this manually as its initialization is
4615 currently missing in the initTool */
4616 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4617 REG_WR(bp, BAR_USTRORM_INTMEM +
4618 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4621 static void bnx2x_init_internal_port(struct bnx2x *bp)
4623 int port = BP_PORT(bp);
4625 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4626 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4627 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4628 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4631 static void bnx2x_init_internal_func(struct bnx2x *bp)
4633 struct tstorm_eth_function_common_config tstorm_config = {0};
4634 struct stats_indication_flags stats_flags = {0};
4635 int port = BP_PORT(bp);
4636 int func = BP_FUNC(bp);
4641 tstorm_config.config_flags = MULTI_FLAGS;
4642 tstorm_config.rss_result_mask = MULTI_MASK;
4645 tstorm_config.leading_client_id = BP_L_ID(bp);
4647 REG_WR(bp, BAR_TSTRORM_INTMEM +
4648 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4649 (*(u32 *)&tstorm_config));
4651 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4652 bnx2x_set_storm_rx_mode(bp);
4654 /* reset xstorm per client statistics */
4655 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4656 REG_WR(bp, BAR_XSTRORM_INTMEM +
4657 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4660 /* reset tstorm per client statistics */
4661 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_TSTRORM_INTMEM +
4663 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4667 /* Init statistics related context */
4668 stats_flags.collect_eth = 1;
4670 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4671 ((u32 *)&stats_flags)[0]);
4672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4673 ((u32 *)&stats_flags)[1]);
4675 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4676 ((u32 *)&stats_flags)[0]);
4677 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4678 ((u32 *)&stats_flags)[1]);
4680 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4681 ((u32 *)&stats_flags)[0]);
4682 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4683 ((u32 *)&stats_flags)[1]);
4685 REG_WR(bp, BAR_XSTRORM_INTMEM +
4686 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4687 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
4689 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4690 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4692 REG_WR(bp, BAR_TSTRORM_INTMEM +
4693 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4694 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4695 REG_WR(bp, BAR_TSTRORM_INTMEM +
4696 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4697 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4699 if (CHIP_IS_E1H(bp)) {
4700 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4702 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4704 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4706 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4709 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4713 /* Init CQ ring mapping and aggregation size */
4714 max_agg_size = min((u32)(bp->rx_buf_use_size +
4715 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4717 for_each_queue(bp, i) {
4718 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 REG_WR(bp, BAR_USTRORM_INTMEM +
4721 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4722 U64_LO(fp->rx_comp_mapping));
4723 REG_WR(bp, BAR_USTRORM_INTMEM +
4724 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4725 U64_HI(fp->rx_comp_mapping));
4727 REG_WR16(bp, BAR_USTRORM_INTMEM +
4728 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4733 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4735 switch (load_code) {
4736 case FW_MSG_CODE_DRV_LOAD_COMMON:
4737 bnx2x_init_internal_common(bp);
4740 case FW_MSG_CODE_DRV_LOAD_PORT:
4741 bnx2x_init_internal_port(bp);
4744 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4745 bnx2x_init_internal_func(bp);
4749 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4754 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4758 for_each_queue(bp, i) {
4759 struct bnx2x_fastpath *fp = &bp->fp[i];
4762 fp->state = BNX2X_FP_STATE_CLOSED;
4764 fp->cl_id = BP_L_ID(bp) + i;
4765 fp->sb_id = fp->cl_id;
4767 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4768 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4769 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4770 fp->status_blk_mapping);
4773 bnx2x_init_def_sb(bp, bp->def_status_blk,
4774 bp->def_status_blk_mapping, DEF_SB_ID);
4775 bnx2x_update_coalesce(bp);
4776 bnx2x_init_rx_rings(bp);
4777 bnx2x_init_tx_ring(bp);
4778 bnx2x_init_sp_ring(bp);
4779 bnx2x_init_context(bp);
4780 bnx2x_init_internal(bp, load_code);
4781 bnx2x_init_ind_table(bp);
4782 bnx2x_int_enable(bp);
4785 /* end of nic init */
4788 * gzip service functions
4791 static int bnx2x_gunzip_init(struct bnx2x *bp)
4793 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4794 &bp->gunzip_mapping);
4795 if (bp->gunzip_buf == NULL)
4798 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4799 if (bp->strm == NULL)
4802 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4804 if (bp->strm->workspace == NULL)
4814 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4815 bp->gunzip_mapping);
4816 bp->gunzip_buf = NULL;
4819 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4820 " un-compression\n", bp->dev->name);
4824 static void bnx2x_gunzip_end(struct bnx2x *bp)
4826 kfree(bp->strm->workspace);
4831 if (bp->gunzip_buf) {
4832 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4833 bp->gunzip_mapping);
4834 bp->gunzip_buf = NULL;
4838 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4842 /* check gzip header */
4843 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4850 if (zbuf[3] & FNAME)
4851 while ((zbuf[n++] != 0) && (n < len));
4853 bp->strm->next_in = zbuf + n;
4854 bp->strm->avail_in = len - n;
4855 bp->strm->next_out = bp->gunzip_buf;
4856 bp->strm->avail_out = FW_BUF_SIZE;
4858 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4862 rc = zlib_inflate(bp->strm, Z_FINISH);
4863 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4864 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4865 bp->dev->name, bp->strm->msg);
4867 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4868 if (bp->gunzip_outlen & 0x3)
4869 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4870 " gunzip_outlen (%d) not aligned\n",
4871 bp->dev->name, bp->gunzip_outlen);
4872 bp->gunzip_outlen >>= 2;
4874 zlib_inflateEnd(bp->strm);
4876 if (rc == Z_STREAM_END)
4882 /* nic load/unload */
4885 * General service functions
4888 /* send a NIG loopback debug packet */
4889 static void bnx2x_lb_pckt(struct bnx2x *bp)
4893 /* Ethernet source and destination addresses */
4894 wb_write[0] = 0x55555555;
4895 wb_write[1] = 0x55555555;
4896 wb_write[2] = 0x20; /* SOP */
4897 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4899 /* NON-IP protocol */
4900 wb_write[0] = 0x09000000;
4901 wb_write[1] = 0x55555555;
4902 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4903 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4906 /* some of the internal memories
4907 * are not directly readable from the driver
4908 * to test them we send debug packets
4910 static int bnx2x_int_mem_test(struct bnx2x *bp)
4916 if (CHIP_REV_IS_FPGA(bp))
4918 else if (CHIP_REV_IS_EMUL(bp))
4923 DP(NETIF_MSG_HW, "start part1\n");
4925 /* Disable inputs of parser neighbor blocks */
4926 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4927 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4928 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4929 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4931 /* Write 0 to parser credits for CFC search request */
4932 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4934 /* send Ethernet packet */
4937 /* TODO do i reset NIG statistic? */
4938 /* Wait until NIG register shows 1 packet of size 0x10 */
4939 count = 1000 * factor;
4942 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4943 val = *bnx2x_sp(bp, wb_data[0]);
4951 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4955 /* Wait until PRS register shows 1 packet */
4956 count = 1000 * factor;
4958 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4966 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4970 /* Reset and init BRB, PRS */
4971 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4973 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4975 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4976 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4978 DP(NETIF_MSG_HW, "part2\n");
4980 /* Disable inputs of parser neighbor blocks */
4981 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4982 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4983 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4984 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4986 /* Write 0 to parser credits for CFC search request */
4987 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4989 /* send 10 Ethernet packets */
4990 for (i = 0; i < 10; i++)
4993 /* Wait until NIG register shows 10 + 1
4994 packets of size 11*0x10 = 0xb0 */
4995 count = 1000 * factor;
4998 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4999 val = *bnx2x_sp(bp, wb_data[0]);
5007 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5011 /* Wait until PRS register shows 2 packets */
5012 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5014 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5016 /* Write 1 to parser credits for CFC search request */
5017 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5019 /* Wait until PRS register shows 3 packets */
5020 msleep(10 * factor);
5021 /* Wait until NIG register shows 1 packet of size 0x10 */
5022 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5024 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5026 /* clear NIG EOP FIFO */
5027 for (i = 0; i < 11; i++)
5028 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5029 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5031 BNX2X_ERR("clear of NIG failed\n");
5035 /* Reset and init BRB, PRS, NIG */
5036 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5038 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5040 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5041 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5044 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5047 /* Enable inputs of parser neighbor blocks */
5048 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5049 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5050 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5051 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5053 DP(NETIF_MSG_HW, "done\n");
5058 static void enable_blocks_attention(struct bnx2x *bp)
5060 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5061 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5062 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5063 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5064 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5065 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5066 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5067 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5068 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5069 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5070 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5071 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5072 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5073 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5074 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5075 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5076 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5077 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5078 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5079 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5080 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5081 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5082 if (CHIP_REV_IS_FPGA(bp))
5083 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5085 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5086 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5087 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5088 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5089 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5090 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5091 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5092 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5093 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5094 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5098 static int bnx2x_init_common(struct bnx2x *bp)
5102 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5104 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5105 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5107 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5108 if (CHIP_IS_E1H(bp))
5109 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5111 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5113 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5115 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5116 if (CHIP_IS_E1(bp)) {
5117 /* enable HW interrupt from PXP on USDM overflow
5118 bit 16 on INT_MASK_0 */
5119 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5122 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5126 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5127 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5128 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5129 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5130 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5131 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5133 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5134 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5135 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5136 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5137 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5142 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5145 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5147 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5148 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5149 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5152 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5153 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5155 /* let the HW do it's magic ... */
5157 /* finish PXP init */
5158 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5160 BNX2X_ERR("PXP2 CFG failed\n");
5163 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5165 BNX2X_ERR("PXP2 RD_INIT failed\n");
5169 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5170 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5172 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5174 /* clean the DMAE memory */
5176 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5178 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5179 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5180 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5181 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5183 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5184 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5185 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5186 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5188 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5189 /* soft reset pulse */
5190 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5191 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5194 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5197 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5198 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5199 if (!CHIP_REV_IS_SLOW(bp)) {
5200 /* enable hw interrupt from doorbell Q */
5201 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5204 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5205 if (CHIP_REV_IS_SLOW(bp)) {
5206 /* fix for emulation and FPGA for no pause */
5207 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5208 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5209 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5210 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5213 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5214 if (CHIP_IS_E1H(bp))
5215 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5217 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5218 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5219 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5220 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5222 if (CHIP_IS_E1H(bp)) {
5223 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5224 STORM_INTMEM_SIZE_E1H/2);
5226 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5227 0, STORM_INTMEM_SIZE_E1H/2);
5228 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5229 STORM_INTMEM_SIZE_E1H/2);
5231 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5232 0, STORM_INTMEM_SIZE_E1H/2);
5233 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5234 STORM_INTMEM_SIZE_E1H/2);
5236 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5237 0, STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1H/2);
5241 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5242 0, STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1);
5246 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5247 STORM_INTMEM_SIZE_E1);
5248 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5249 STORM_INTMEM_SIZE_E1);
5250 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1);
5254 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5255 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5256 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5257 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5260 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5262 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5265 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5266 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5267 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5269 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5270 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5271 REG_WR(bp, i, 0xc0cac01a);
5272 /* TODO: replace with something meaningful */
5274 if (CHIP_IS_E1H(bp))
5275 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5276 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5278 if (sizeof(union cdu_context) != 1024)
5279 /* we currently assume that a context is 1024 bytes */
5280 printk(KERN_ALERT PFX "please adjust the size of"
5281 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5283 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5284 val = (4 << 24) + (0 << 12) + 1024;
5285 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5286 if (CHIP_IS_E1(bp)) {
5287 /* !!! fix pxp client crdit until excel update */
5288 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5289 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5292 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5293 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5295 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5296 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5298 /* PXPCS COMMON comes here */
5299 /* Reset PCIE errors for debug */
5300 REG_WR(bp, 0x2814, 0xffffffff);
5301 REG_WR(bp, 0x3820, 0xffffffff);
5303 /* EMAC0 COMMON comes here */
5304 /* EMAC1 COMMON comes here */
5305 /* DBU COMMON comes here */
5306 /* DBG COMMON comes here */
5308 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5309 if (CHIP_IS_E1H(bp)) {
5310 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5311 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5314 if (CHIP_REV_IS_SLOW(bp))
5317 /* finish CFC init */
5318 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5320 BNX2X_ERR("CFC LL_INIT failed\n");
5323 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5325 BNX2X_ERR("CFC AC_INIT failed\n");
5328 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5330 BNX2X_ERR("CFC CAM_INIT failed\n");
5333 REG_WR(bp, CFC_REG_DEBUG0, 0);
5335 /* read NIG statistic
5336 to see if this is our first up since powerup */
5337 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5338 val = *bnx2x_sp(bp, wb_data[0]);
5340 /* do internal memory self test */
5341 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5342 BNX2X_ERR("internal mem self test failed\n");
5346 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5347 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5348 /* Fan failure is indicated by SPIO 5 */
5349 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5350 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5352 /* set to active low mode */
5353 val = REG_RD(bp, MISC_REG_SPIO_INT);
5354 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5355 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5356 REG_WR(bp, MISC_REG_SPIO_INT, val);
5358 /* enable interrupt to signal the IGU */
5359 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5360 val |= (1 << MISC_REGISTERS_SPIO_5);
5361 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5368 /* clear PXP2 attentions */
5369 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5371 enable_blocks_attention(bp);
5373 if (bp->flags & TPA_ENABLE_FLAG) {
5374 struct tstorm_eth_tpa_exist tmp = {0};
5378 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5380 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5387 static int bnx2x_init_port(struct bnx2x *bp)
5389 int port = BP_PORT(bp);
5392 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5394 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5396 /* Port PXP comes here */
5397 /* Port PXP2 comes here */
5402 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5403 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5404 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5410 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5411 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5412 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5418 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5419 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5420 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5421 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5423 /* Port CMs come here */
5425 /* Port QM comes here */
5427 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5428 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5430 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5431 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5433 /* Port DQ comes here */
5434 /* Port BRB1 comes here */
5435 /* Port PRS comes here */
5436 /* Port TSDM comes here */
5437 /* Port CSDM comes here */
5438 /* Port USDM comes here */
5439 /* Port XSDM comes here */
5440 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5441 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5442 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5443 port ? USEM_PORT1_END : USEM_PORT0_END);
5444 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5445 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5446 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5447 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5448 /* Port UPB comes here */
5449 /* Port XPB comes here */
5451 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5452 port ? PBF_PORT1_END : PBF_PORT0_END);
5454 /* configure PBF to work without PAUSE mtu 9000 */
5455 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5457 /* update threshold */
5458 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5459 /* update init credit */
5460 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5463 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5465 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5468 /* tell the searcher where the T2 table is */
5469 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5471 wb_write[0] = U64_LO(bp->t2_mapping);
5472 wb_write[1] = U64_HI(bp->t2_mapping);
5473 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5474 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5475 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5476 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5478 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5479 /* Port SRCH comes here */
5481 /* Port CDU comes here */
5482 /* Port CFC comes here */
5484 if (CHIP_IS_E1(bp)) {
5485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5486 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5488 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5489 port ? HC_PORT1_END : HC_PORT0_END);
5491 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5492 MISC_AEU_PORT0_START,
5493 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5494 /* init aeu_mask_attn_func_0/1:
5495 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5496 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5497 * bits 4-7 are used for "per vn group attention" */
5498 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5499 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5501 /* Port PXPCS comes here */
5502 /* Port EMAC0 comes here */
5503 /* Port EMAC1 comes here */
5504 /* Port DBU comes here */
5505 /* Port DBG comes here */
5506 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5507 port ? NIG_PORT1_END : NIG_PORT0_END);
5509 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5511 if (CHIP_IS_E1H(bp)) {
5513 struct cmng_struct_per_port m_cmng_port;
5516 /* 0x2 disable e1hov, 0x1 enable */
5517 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5518 (IS_E1HMF(bp) ? 0x1 : 0x2));
5520 /* Init RATE SHAPING and FAIRNESS contexts.
5521 Initialize as if there is 10G link. */
5522 wsum = bnx2x_calc_vn_wsum(bp);
5523 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5525 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5526 bnx2x_init_vn_minmax(bp, 2*vn + port,
5527 wsum, 10000, &m_cmng_port);
5530 /* Port MCP comes here */
5531 /* Port DMAE comes here */
5533 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5534 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5535 /* add SPIO 5 to group 0 */
5536 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5537 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5538 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5545 bnx2x__link_reset(bp);
5550 #define ILT_PER_FUNC (768/2)
5551 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5552 /* the phys address is shifted right 12 bits and has an added
5553 1=valid bit added to the 53rd bit
5554 then since this is a wide register(TM)
5555 we split it into two 32 bit writes
5557 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5558 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5559 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5560 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5562 #define CNIC_ILT_LINES 0
5564 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5568 if (CHIP_IS_E1H(bp))
5569 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5571 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5573 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5576 static int bnx2x_init_func(struct bnx2x *bp)
5578 int port = BP_PORT(bp);
5579 int func = BP_FUNC(bp);
5582 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5584 i = FUNC_ILT_BASE(func);
5586 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5587 if (CHIP_IS_E1H(bp)) {
5588 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5589 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5591 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5592 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5595 if (CHIP_IS_E1H(bp)) {
5596 for (i = 0; i < 9; i++)
5597 bnx2x_init_block(bp,
5598 cm_start[func][i], cm_end[func][i]);
5600 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5601 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5604 /* HC init per function */
5605 if (CHIP_IS_E1H(bp)) {
5606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5608 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5609 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5611 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5613 if (CHIP_IS_E1H(bp))
5614 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5616 /* Reset PCIE errors for debug */
5617 REG_WR(bp, 0x2114, 0xffffffff);
5618 REG_WR(bp, 0x2120, 0xffffffff);
5623 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5627 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5628 BP_FUNC(bp), load_code);
5631 mutex_init(&bp->dmae_mutex);
5632 bnx2x_gunzip_init(bp);
5634 switch (load_code) {
5635 case FW_MSG_CODE_DRV_LOAD_COMMON:
5636 rc = bnx2x_init_common(bp);
5641 case FW_MSG_CODE_DRV_LOAD_PORT:
5643 rc = bnx2x_init_port(bp);
5648 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5650 rc = bnx2x_init_func(bp);
5656 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5660 if (!BP_NOMCP(bp)) {
5661 int func = BP_FUNC(bp);
5663 bp->fw_drv_pulse_wr_seq =
5664 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5665 DRV_PULSE_SEQ_MASK);
5666 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5667 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5668 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5672 /* this needs to be done before gunzip end */
5673 bnx2x_zero_def_sb(bp);
5674 for_each_queue(bp, i)
5675 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5678 bnx2x_gunzip_end(bp);
5683 /* send the MCP a request, block until there is a reply */
5684 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5686 int func = BP_FUNC(bp);
5687 u32 seq = ++bp->fw_seq;
5690 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5692 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5693 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5696 /* let the FW do it's magic ... */
5699 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5701 /* Give the FW up to 2 second (200*10ms) */
5702 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5705 cnt*delay, rc, seq);
5707 /* is this a reply to our command? */
5708 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5709 rc &= FW_MSG_CODE_MASK;
5713 BNX2X_ERR("FW failed to respond!\n");
5721 static void bnx2x_free_mem(struct bnx2x *bp)
5724 #define BNX2X_PCI_FREE(x, y, size) \
5727 pci_free_consistent(bp->pdev, size, x, y); \
5733 #define BNX2X_FREE(x) \
5744 for_each_queue(bp, i) {
5747 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5748 bnx2x_fp(bp, i, status_blk_mapping),
5749 sizeof(struct host_status_block) +
5750 sizeof(struct eth_tx_db_data));
5752 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5753 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5754 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5755 bnx2x_fp(bp, i, tx_desc_mapping),
5756 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5758 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5760 bnx2x_fp(bp, i, rx_desc_mapping),
5761 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5764 bnx2x_fp(bp, i, rx_comp_mapping),
5765 sizeof(struct eth_fast_path_rx_cqe) *
5769 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5770 bnx2x_fp(bp, i, rx_sge_mapping),
5771 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5773 /* end of fastpath */
5775 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5776 sizeof(struct host_def_status_block));
5778 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5779 sizeof(struct bnx2x_slowpath));
5782 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5783 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5784 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5785 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5787 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5789 #undef BNX2X_PCI_FREE
5793 static int bnx2x_alloc_mem(struct bnx2x *bp)
5796 #define BNX2X_PCI_ALLOC(x, y, size) \
5798 x = pci_alloc_consistent(bp->pdev, size, y); \
5800 goto alloc_mem_err; \
5801 memset(x, 0, size); \
5804 #define BNX2X_ALLOC(x, size) \
5806 x = vmalloc(size); \
5808 goto alloc_mem_err; \
5809 memset(x, 0, size); \
5815 for_each_queue(bp, i) {
5816 bnx2x_fp(bp, i, bp) = bp;
5819 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5820 &bnx2x_fp(bp, i, status_blk_mapping),
5821 sizeof(struct host_status_block) +
5822 sizeof(struct eth_tx_db_data));
5824 bnx2x_fp(bp, i, hw_tx_prods) =
5825 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5827 bnx2x_fp(bp, i, tx_prods_mapping) =
5828 bnx2x_fp(bp, i, status_blk_mapping) +
5829 sizeof(struct host_status_block);
5831 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5832 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5833 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5835 &bnx2x_fp(bp, i, tx_desc_mapping),
5836 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5838 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5839 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5840 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5841 &bnx2x_fp(bp, i, rx_desc_mapping),
5842 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5845 &bnx2x_fp(bp, i, rx_comp_mapping),
5846 sizeof(struct eth_fast_path_rx_cqe) *
5850 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5851 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5852 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5853 &bnx2x_fp(bp, i, rx_sge_mapping),
5854 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5856 /* end of fastpath */
5858 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5859 sizeof(struct host_def_status_block));
5861 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5862 sizeof(struct bnx2x_slowpath));
5865 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5868 for (i = 0; i < 64*1024; i += 64) {
5869 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5870 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5873 /* allocate searcher T2 table
5874 we allocate 1/4 of alloc num for T2
5875 (which is not entered into the ILT) */
5876 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5879 for (i = 0; i < 16*1024; i += 64)
5880 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5882 /* now fixup the last line in the block to point to the next block */
5883 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5885 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5886 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5888 /* QM queues (128*MAX_CONN) */
5889 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5892 /* Slow path ring */
5893 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5901 #undef BNX2X_PCI_ALLOC
5905 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5909 for_each_queue(bp, i) {
5910 struct bnx2x_fastpath *fp = &bp->fp[i];
5912 u16 bd_cons = fp->tx_bd_cons;
5913 u16 sw_prod = fp->tx_pkt_prod;
5914 u16 sw_cons = fp->tx_pkt_cons;
5916 while (sw_cons != sw_prod) {
5917 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5923 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5927 for_each_queue(bp, j) {
5928 struct bnx2x_fastpath *fp = &bp->fp[j];
5930 for (i = 0; i < NUM_RX_BD; i++) {
5931 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5932 struct sk_buff *skb = rx_buf->skb;
5937 pci_unmap_single(bp->pdev,
5938 pci_unmap_addr(rx_buf, mapping),
5939 bp->rx_buf_use_size,
5940 PCI_DMA_FROMDEVICE);
5945 if (!fp->disable_tpa)
5946 bnx2x_free_tpa_pool(bp, fp,
5947 ETH_MAX_AGGREGATION_QUEUES_E1H);
5951 static void bnx2x_free_skbs(struct bnx2x *bp)
5953 bnx2x_free_tx_skbs(bp);
5954 bnx2x_free_rx_skbs(bp);
5957 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5961 free_irq(bp->msix_table[0].vector, bp->dev);
5962 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5963 bp->msix_table[0].vector);
5965 for_each_queue(bp, i) {
5966 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5967 "state %x\n", i, bp->msix_table[i + offset].vector,
5968 bnx2x_fp(bp, i, state));
5970 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5971 BNX2X_ERR("IRQ of fp #%d being freed while "
5972 "state != closed\n", i);
5974 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5978 static void bnx2x_free_irq(struct bnx2x *bp)
5980 if (bp->flags & USING_MSIX_FLAG) {
5981 bnx2x_free_msix_irqs(bp);
5982 pci_disable_msix(bp->pdev);
5983 bp->flags &= ~USING_MSIX_FLAG;
5986 free_irq(bp->pdev->irq, bp->dev);
5989 static int bnx2x_enable_msix(struct bnx2x *bp)
5993 bp->msix_table[0].entry = 0;
5995 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5997 for_each_queue(bp, i) {
5998 int igu_vec = offset + i + BP_L_ID(bp);
6000 bp->msix_table[i + offset].entry = igu_vec;
6001 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6002 "(fastpath #%u)\n", i + offset, igu_vec, i);
6005 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6006 bp->num_queues + offset);
6008 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6011 bp->flags |= USING_MSIX_FLAG;
6016 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6018 int i, rc, offset = 1;
6020 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6021 bp->dev->name, bp->dev);
6023 BNX2X_ERR("request sp irq failed\n");
6027 for_each_queue(bp, i) {
6028 rc = request_irq(bp->msix_table[i + offset].vector,
6029 bnx2x_msix_fp_int, 0,
6030 bp->dev->name, &bp->fp[i]);
6032 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6034 bnx2x_free_msix_irqs(bp);
6038 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6044 static int bnx2x_req_irq(struct bnx2x *bp)
6048 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6049 bp->dev->name, bp->dev);
6051 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6057 * Init service functions
6060 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6062 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6063 int port = BP_PORT(bp);
6066 * unicasts 0-31:port0 32-63:port1
6067 * multicast 64-127:port0 128-191:port1
6069 config->hdr.length_6b = 2;
6070 config->hdr.offset = port ? 31 : 0;
6071 config->hdr.client_id = BP_CL_ID(bp);
6072 config->hdr.reserved1 = 0;
6075 config->config_table[0].cam_entry.msb_mac_addr =
6076 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6077 config->config_table[0].cam_entry.middle_mac_addr =
6078 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6079 config->config_table[0].cam_entry.lsb_mac_addr =
6080 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6081 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6082 config->config_table[0].target_table_entry.flags = 0;
6083 config->config_table[0].target_table_entry.client_id = 0;
6084 config->config_table[0].target_table_entry.vlan_id = 0;
6086 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6087 config->config_table[0].cam_entry.msb_mac_addr,
6088 config->config_table[0].cam_entry.middle_mac_addr,
6089 config->config_table[0].cam_entry.lsb_mac_addr);
6092 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6093 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6094 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6095 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6096 config->config_table[1].target_table_entry.flags =
6097 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6098 config->config_table[1].target_table_entry.client_id = 0;
6099 config->config_table[1].target_table_entry.vlan_id = 0;
6101 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6102 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6103 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6106 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6108 struct mac_configuration_cmd_e1h *config =
6109 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6111 if (bp->state != BNX2X_STATE_OPEN) {
6112 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6116 /* CAM allocation for E1H
6117 * unicasts: by func number
6118 * multicast: 20+FUNC*20, 20 each
6120 config->hdr.length_6b = 1;
6121 config->hdr.offset = BP_FUNC(bp);
6122 config->hdr.client_id = BP_CL_ID(bp);
6123 config->hdr.reserved1 = 0;
6126 config->config_table[0].msb_mac_addr =
6127 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6128 config->config_table[0].middle_mac_addr =
6129 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6130 config->config_table[0].lsb_mac_addr =
6131 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6132 config->config_table[0].client_id = BP_L_ID(bp);
6133 config->config_table[0].vlan_id = 0;
6134 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6135 config->config_table[0].flags = BP_PORT(bp);
6137 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6138 config->config_table[0].msb_mac_addr,
6139 config->config_table[0].middle_mac_addr,
6140 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6142 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6143 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6144 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6147 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6148 int *state_p, int poll)
6150 /* can take a while if any port is running */
6153 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6154 poll ? "polling" : "waiting", state, idx);
6159 bnx2x_rx_int(bp->fp, 10);
6160 /* if index is different from 0
6161 * the reply for some commands will
6162 * be on the none default queue
6165 bnx2x_rx_int(&bp->fp[idx], 10);
6167 mb(); /* state is changed by bnx2x_sp_event() */
6169 if (*state_p == state)
6176 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6177 poll ? "polling" : "waiting", state, idx);
6178 #ifdef BNX2X_STOP_ON_ERROR
6185 static int bnx2x_setup_leading(struct bnx2x *bp)
6189 /* reset IGU state */
6190 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6193 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6195 /* Wait for completion */
6196 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6201 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6203 /* reset IGU state */
6204 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6207 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6208 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6210 /* Wait for completion */
6211 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6212 &(bp->fp[index].state), 0);
6215 static int bnx2x_poll(struct napi_struct *napi, int budget);
6216 static void bnx2x_set_rx_mode(struct net_device *dev);
6218 /* must be called with rtnl_lock */
6219 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6224 #ifdef BNX2X_STOP_ON_ERROR
6225 if (unlikely(bp->panic))
6229 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6231 /* Send LOAD_REQUEST command to MCP
6232 Returns the type of LOAD command:
6233 if it is the first port to be initialized
6234 common blocks should be initialized, otherwise - not
6236 if (!BP_NOMCP(bp)) {
6237 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6239 BNX2X_ERR("MCP response failure, aborting\n");
6242 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6243 return -EBUSY; /* other port in diagnostic mode */
6246 int port = BP_PORT(bp);
6248 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6249 load_count[0], load_count[1], load_count[2]);
6251 load_count[1 + port]++;
6252 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6253 load_count[0], load_count[1], load_count[2]);
6254 if (load_count[0] == 1)
6255 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6256 else if (load_count[1 + port] == 1)
6257 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6259 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6262 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6263 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6267 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6269 /* if we can't use MSI-X we only need one fp,
6270 * so try to enable MSI-X with the requested number of fp's
6271 * and fallback to inta with one fp
6277 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6278 /* user requested number */
6279 bp->num_queues = use_multi;
6282 bp->num_queues = min_t(u32, num_online_cpus(),
6287 if (bnx2x_enable_msix(bp)) {
6288 /* failed to enable MSI-X */
6291 BNX2X_ERR("Multi requested but failed"
6292 " to enable MSI-X\n");
6296 "set number of queues to %d\n", bp->num_queues);
6298 if (bnx2x_alloc_mem(bp))
6301 for_each_queue(bp, i)
6302 bnx2x_fp(bp, i, disable_tpa) =
6303 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6305 if (bp->flags & USING_MSIX_FLAG) {
6306 rc = bnx2x_req_msix_irqs(bp);
6308 pci_disable_msix(bp->pdev);
6313 rc = bnx2x_req_irq(bp);
6315 BNX2X_ERR("IRQ request failed, aborting\n");
6320 for_each_queue(bp, i)
6321 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6325 rc = bnx2x_init_hw(bp, load_code);
6327 BNX2X_ERR("HW init failed, aborting\n");
6331 /* Setup NIC internals and enable interrupts */
6332 bnx2x_nic_init(bp, load_code);
6334 /* Send LOAD_DONE command to MCP */
6335 if (!BP_NOMCP(bp)) {
6336 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6338 BNX2X_ERR("MCP response failure, aborting\n");
6340 goto load_int_disable;
6344 bnx2x_stats_init(bp);
6346 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6348 /* Enable Rx interrupt handling before sending the ramrod
6349 as it's completed on Rx FP queue */
6350 for_each_queue(bp, i)
6351 napi_enable(&bnx2x_fp(bp, i, napi));
6353 /* Enable interrupt handling */
6354 atomic_set(&bp->intr_sem, 0);
6356 rc = bnx2x_setup_leading(bp);
6358 BNX2X_ERR("Setup leading failed!\n");
6359 goto load_stop_netif;
6362 if (CHIP_IS_E1H(bp))
6363 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6364 BNX2X_ERR("!!! mf_cfg function disabled\n");
6365 bp->state = BNX2X_STATE_DISABLED;
6368 if (bp->state == BNX2X_STATE_OPEN)
6369 for_each_nondefault_queue(bp, i) {
6370 rc = bnx2x_setup_multi(bp, i);
6372 goto load_stop_netif;
6376 bnx2x_set_mac_addr_e1(bp);
6378 bnx2x_set_mac_addr_e1h(bp);
6381 bnx2x_initial_phy_init(bp);
6383 /* Start fast path */
6384 switch (load_mode) {
6386 /* Tx queue should be only reenabled */
6387 netif_wake_queue(bp->dev);
6388 bnx2x_set_rx_mode(bp->dev);
6392 netif_start_queue(bp->dev);
6393 bnx2x_set_rx_mode(bp->dev);
6394 if (bp->flags & USING_MSIX_FLAG)
6395 printk(KERN_INFO PFX "%s: using MSI-X\n",
6400 bnx2x_set_rx_mode(bp->dev);
6401 bp->state = BNX2X_STATE_DIAG;
6409 bnx2x__link_status_update(bp);
6411 /* start the timer */
6412 mod_timer(&bp->timer, jiffies + bp->current_interval);
6418 for_each_queue(bp, i)
6419 napi_disable(&bnx2x_fp(bp, i, napi));
6422 bnx2x_int_disable_sync(bp);
6427 /* Free SKBs, SGEs, TPA pool and driver internals */
6428 bnx2x_free_skbs(bp);
6429 for_each_queue(bp, i)
6430 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6431 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6435 /* TBD we really need to reset the chip
6436 if we want to recover from this */
6440 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6444 /* halt the connection */
6445 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6446 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6448 /* Wait for completion */
6449 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6450 &(bp->fp[index].state), 1);
6451 if (rc) /* timeout */
6454 /* delete cfc entry */
6455 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6457 /* Wait for completion */
6458 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6459 &(bp->fp[index].state), 1);
6463 static int bnx2x_stop_leading(struct bnx2x *bp)
6465 u16 dsb_sp_prod_idx;
6466 /* if the other port is handling traffic,
6467 this can take a lot of time */
6473 /* Send HALT ramrod */
6474 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6475 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6477 /* Wait for completion */
6478 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6479 &(bp->fp[0].state), 1);
6480 if (rc) /* timeout */
6483 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6485 /* Send PORT_DELETE ramrod */
6486 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6488 /* Wait for completion to arrive on default status block
6489 we are going to reset the chip anyway
6490 so there is not much to do if this times out
6492 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6495 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6496 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6497 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6498 #ifdef BNX2X_STOP_ON_ERROR
6508 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6509 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6514 static void bnx2x_reset_func(struct bnx2x *bp)
6516 int port = BP_PORT(bp);
6517 int func = BP_FUNC(bp);
6521 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6522 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6524 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6527 base = FUNC_ILT_BASE(func);
6528 for (i = base; i < base + ILT_PER_FUNC; i++)
6529 bnx2x_ilt_wr(bp, i, 0);
6532 static void bnx2x_reset_port(struct bnx2x *bp)
6534 int port = BP_PORT(bp);
6537 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6539 /* Do not rcv packets to BRB */
6540 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6541 /* Do not direct rcv packets that are not for MCP to the BRB */
6542 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6543 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6546 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6549 /* Check for BRB port occupancy */
6550 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6552 DP(NETIF_MSG_IFDOWN,
6553 "BRB1 is not empty %d blooks are occupied\n", val);
6555 /* TODO: Close Doorbell port? */
6558 static void bnx2x_reset_common(struct bnx2x *bp)
6561 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6563 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6566 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6568 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6569 BP_FUNC(bp), reset_code);
6571 switch (reset_code) {
6572 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6573 bnx2x_reset_port(bp);
6574 bnx2x_reset_func(bp);
6575 bnx2x_reset_common(bp);
6578 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6579 bnx2x_reset_port(bp);
6580 bnx2x_reset_func(bp);
6583 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6584 bnx2x_reset_func(bp);
6588 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6593 /* msut be called with rtnl_lock */
6594 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6596 int port = BP_PORT(bp);
6600 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6602 bp->rx_mode = BNX2X_RX_MODE_NONE;
6603 bnx2x_set_storm_rx_mode(bp);
6605 if (netif_running(bp->dev)) {
6606 netif_tx_disable(bp->dev);
6607 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6610 del_timer_sync(&bp->timer);
6611 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6612 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6613 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6615 /* Wait until tx fast path tasks complete */
6616 for_each_queue(bp, i) {
6617 struct bnx2x_fastpath *fp = &bp->fp[i];
6621 while (BNX2X_HAS_TX_WORK(fp)) {
6623 if (!netif_running(bp->dev))
6624 bnx2x_tx_int(fp, 1000);
6627 BNX2X_ERR("timeout waiting for queue[%d]\n",
6629 #ifdef BNX2X_STOP_ON_ERROR
6642 /* Give HW time to discard old tx messages */
6645 for_each_queue(bp, i)
6646 napi_disable(&bnx2x_fp(bp, i, napi));
6647 /* Disable interrupts after Tx and Rx are disabled on stack level */
6648 bnx2x_int_disable_sync(bp);
6653 if (unload_mode == UNLOAD_NORMAL)
6654 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6656 else if (bp->flags & NO_WOL_FLAG) {
6657 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6658 if (CHIP_IS_E1H(bp))
6659 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6661 } else if (bp->wol) {
6662 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6663 u8 *mac_addr = bp->dev->dev_addr;
6665 /* The mac address is written to entries 1-4 to
6666 preserve entry 0 which is used by the PMF */
6667 u8 entry = (BP_E1HVN(bp) + 1)*8;
6669 val = (mac_addr[0] << 8) | mac_addr[1];
6670 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6672 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6673 (mac_addr[4] << 8) | mac_addr[5];
6674 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6676 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6679 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6681 if (CHIP_IS_E1H(bp))
6682 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6684 /* Close multi and leading connections
6685 Completions for ramrods are collected in a synchronous way */
6686 for_each_nondefault_queue(bp, i)
6687 if (bnx2x_stop_multi(bp, i))
6690 rc = bnx2x_stop_leading(bp);
6692 BNX2X_ERR("Stop leading failed!\n");
6693 #ifdef BNX2X_STOP_ON_ERROR
6702 reset_code = bnx2x_fw_command(bp, reset_code);
6704 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6705 load_count[0], load_count[1], load_count[2]);
6707 load_count[1 + port]--;
6708 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6709 load_count[0], load_count[1], load_count[2]);
6710 if (load_count[0] == 0)
6711 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6712 else if (load_count[1 + port] == 0)
6713 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6715 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6718 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6719 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6720 bnx2x__link_reset(bp);
6722 /* Reset the chip */
6723 bnx2x_reset_chip(bp, reset_code);
6725 /* Report UNLOAD_DONE to MCP */
6727 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6729 /* Free SKBs, SGEs, TPA pool and driver internals */
6730 bnx2x_free_skbs(bp);
6731 for_each_queue(bp, i)
6732 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6733 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6736 bp->state = BNX2X_STATE_CLOSED;
6738 netif_carrier_off(bp->dev);
6743 static void bnx2x_reset_task(struct work_struct *work)
6745 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6747 #ifdef BNX2X_STOP_ON_ERROR
6748 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6749 " so reset not done to allow debug dump,\n"
6750 KERN_ERR " you will need to reboot when done\n");
6756 if (!netif_running(bp->dev))
6757 goto reset_task_exit;
6759 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6760 bnx2x_nic_load(bp, LOAD_NORMAL);
6766 /* end of nic load/unload */
6771 * Init service functions
6774 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6778 /* Check if there is any driver already loaded */
6779 val = REG_RD(bp, MISC_REG_UNPREPARED);
6781 /* Check if it is the UNDI driver
6782 * UNDI driver initializes CID offset for normal bell to 0x7
6784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6785 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6787 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6789 int func = BP_FUNC(bp);
6793 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6795 /* try unload UNDI on port 0 */
6798 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6799 DRV_MSG_SEQ_NUMBER_MASK);
6800 reset_code = bnx2x_fw_command(bp, reset_code);
6802 /* if UNDI is loaded on the other port */
6803 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6805 /* send "DONE" for previous unload */
6806 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6808 /* unload UNDI on port 1 */
6811 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6812 DRV_MSG_SEQ_NUMBER_MASK);
6813 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6815 bnx2x_fw_command(bp, reset_code);
6818 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6819 HC_REG_CONFIG_0), 0x1000);
6821 /* close input traffic and wait for it */
6822 /* Do not rcv packets to BRB */
6824 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6825 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6826 /* Do not direct rcv packets that are not for MCP to
6829 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6830 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6833 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6834 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6837 /* save NIG port swap info */
6838 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6839 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6842 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6845 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6847 /* take the NIG out of reset and restore swap values */
6849 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6850 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6851 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6852 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6854 /* send unload done to the MCP */
6855 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6857 /* restore our func and fw_seq */
6860 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6861 DRV_MSG_SEQ_NUMBER_MASK);
6863 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6867 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6869 u32 val, val2, val3, val4, id;
6871 /* Get the chip revision id and number. */
6872 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6873 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6874 id = ((val & 0xffff) << 16);
6875 val = REG_RD(bp, MISC_REG_CHIP_REV);
6876 id |= ((val & 0xf) << 12);
6877 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6878 id |= ((val & 0xff) << 4);
6879 REG_RD(bp, MISC_REG_BOND_ID);
6881 bp->common.chip_id = id;
6882 bp->link_params.chip_id = bp->common.chip_id;
6883 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6885 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6886 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6887 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6888 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6889 bp->common.flash_size, bp->common.flash_size);
6891 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6892 bp->link_params.shmem_base = bp->common.shmem_base;
6893 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6895 if (!bp->common.shmem_base ||
6896 (bp->common.shmem_base < 0xA0000) ||
6897 (bp->common.shmem_base >= 0xC0000)) {
6898 BNX2X_DEV_INFO("MCP not active\n");
6899 bp->flags |= NO_MCP_FLAG;
6903 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6904 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6905 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6906 BNX2X_ERR("BAD MCP validity signature\n");
6908 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6909 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6911 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6912 bp->common.hw_config, bp->common.board);
6914 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6915 SHARED_HW_CFG_LED_MODE_MASK) >>
6916 SHARED_HW_CFG_LED_MODE_SHIFT);
6918 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6919 bp->common.bc_ver = val;
6920 BNX2X_DEV_INFO("bc_ver %X\n", val);
6921 if (val < BNX2X_BC_VER) {
6922 /* for now only warn
6923 * later we might need to enforce this */
6924 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6925 " please upgrade BC\n", BNX2X_BC_VER, val);
6927 BNX2X_DEV_INFO("%sWoL Capable\n",
6928 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6930 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6931 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6932 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6933 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6935 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6936 val, val2, val3, val4);
6939 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6942 int port = BP_PORT(bp);
6945 switch (switch_cfg) {
6947 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6950 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6951 switch (ext_phy_type) {
6952 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6953 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6956 bp->port.supported |= (SUPPORTED_10baseT_Half |
6957 SUPPORTED_10baseT_Full |
6958 SUPPORTED_100baseT_Half |
6959 SUPPORTED_100baseT_Full |
6960 SUPPORTED_1000baseT_Full |
6961 SUPPORTED_2500baseX_Full |
6966 SUPPORTED_Asym_Pause);
6969 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6970 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6973 bp->port.supported |= (SUPPORTED_10baseT_Half |
6974 SUPPORTED_10baseT_Full |
6975 SUPPORTED_100baseT_Half |
6976 SUPPORTED_100baseT_Full |
6977 SUPPORTED_1000baseT_Full |
6982 SUPPORTED_Asym_Pause);
6986 BNX2X_ERR("NVRAM config error. "
6987 "BAD SerDes ext_phy_config 0x%x\n",
6988 bp->link_params.ext_phy_config);
6992 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6994 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6997 case SWITCH_CFG_10G:
6998 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7001 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7002 switch (ext_phy_type) {
7003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7004 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7007 bp->port.supported |= (SUPPORTED_10baseT_Half |
7008 SUPPORTED_10baseT_Full |
7009 SUPPORTED_100baseT_Half |
7010 SUPPORTED_100baseT_Full |
7011 SUPPORTED_1000baseT_Full |
7012 SUPPORTED_2500baseX_Full |
7013 SUPPORTED_10000baseT_Full |
7018 SUPPORTED_Asym_Pause);
7021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7022 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7025 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7028 SUPPORTED_Asym_Pause);
7031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7032 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7035 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7036 SUPPORTED_1000baseT_Full |
7039 SUPPORTED_Asym_Pause);
7042 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7043 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7046 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7047 SUPPORTED_1000baseT_Full |
7051 SUPPORTED_Asym_Pause);
7054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7055 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7058 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7059 SUPPORTED_2500baseX_Full |
7060 SUPPORTED_1000baseT_Full |
7064 SUPPORTED_Asym_Pause);
7067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7068 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7071 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7075 SUPPORTED_Asym_Pause);
7078 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7079 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7080 bp->link_params.ext_phy_config);
7084 BNX2X_ERR("NVRAM config error. "
7085 "BAD XGXS ext_phy_config 0x%x\n",
7086 bp->link_params.ext_phy_config);
7090 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7092 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7097 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7098 bp->port.link_config);
7101 bp->link_params.phy_addr = bp->port.phy_addr;
7103 /* mask what we support according to speed_cap_mask */
7104 if (!(bp->link_params.speed_cap_mask &
7105 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7106 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7108 if (!(bp->link_params.speed_cap_mask &
7109 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7110 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7112 if (!(bp->link_params.speed_cap_mask &
7113 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7114 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7116 if (!(bp->link_params.speed_cap_mask &
7117 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7118 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7120 if (!(bp->link_params.speed_cap_mask &
7121 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7122 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7123 SUPPORTED_1000baseT_Full);
7125 if (!(bp->link_params.speed_cap_mask &
7126 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7127 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7129 if (!(bp->link_params.speed_cap_mask &
7130 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7131 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7133 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7136 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7138 bp->link_params.req_duplex = DUPLEX_FULL;
7140 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7141 case PORT_FEATURE_LINK_SPEED_AUTO:
7142 if (bp->port.supported & SUPPORTED_Autoneg) {
7143 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7144 bp->port.advertising = bp->port.supported;
7147 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7149 if ((ext_phy_type ==
7150 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7153 /* force 10G, no AN */
7154 bp->link_params.req_line_speed = SPEED_10000;
7155 bp->port.advertising =
7156 (ADVERTISED_10000baseT_Full |
7160 BNX2X_ERR("NVRAM config error. "
7161 "Invalid link_config 0x%x"
7162 " Autoneg not supported\n",
7163 bp->port.link_config);
7168 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7169 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7170 bp->link_params.req_line_speed = SPEED_10;
7171 bp->port.advertising = (ADVERTISED_10baseT_Full |
7174 BNX2X_ERR("NVRAM config error. "
7175 "Invalid link_config 0x%x"
7176 " speed_cap_mask 0x%x\n",
7177 bp->port.link_config,
7178 bp->link_params.speed_cap_mask);
7183 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7184 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7185 bp->link_params.req_line_speed = SPEED_10;
7186 bp->link_params.req_duplex = DUPLEX_HALF;
7187 bp->port.advertising = (ADVERTISED_10baseT_Half |
7190 BNX2X_ERR("NVRAM config error. "
7191 "Invalid link_config 0x%x"
7192 " speed_cap_mask 0x%x\n",
7193 bp->port.link_config,
7194 bp->link_params.speed_cap_mask);
7199 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7200 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7201 bp->link_params.req_line_speed = SPEED_100;
7202 bp->port.advertising = (ADVERTISED_100baseT_Full |
7205 BNX2X_ERR("NVRAM config error. "
7206 "Invalid link_config 0x%x"
7207 " speed_cap_mask 0x%x\n",
7208 bp->port.link_config,
7209 bp->link_params.speed_cap_mask);
7214 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7215 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7216 bp->link_params.req_line_speed = SPEED_100;
7217 bp->link_params.req_duplex = DUPLEX_HALF;
7218 bp->port.advertising = (ADVERTISED_100baseT_Half |
7221 BNX2X_ERR("NVRAM config error. "
7222 "Invalid link_config 0x%x"
7223 " speed_cap_mask 0x%x\n",
7224 bp->port.link_config,
7225 bp->link_params.speed_cap_mask);
7230 case PORT_FEATURE_LINK_SPEED_1G:
7231 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7232 bp->link_params.req_line_speed = SPEED_1000;
7233 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7236 BNX2X_ERR("NVRAM config error. "
7237 "Invalid link_config 0x%x"
7238 " speed_cap_mask 0x%x\n",
7239 bp->port.link_config,
7240 bp->link_params.speed_cap_mask);
7245 case PORT_FEATURE_LINK_SPEED_2_5G:
7246 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7247 bp->link_params.req_line_speed = SPEED_2500;
7248 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7251 BNX2X_ERR("NVRAM config error. "
7252 "Invalid link_config 0x%x"
7253 " speed_cap_mask 0x%x\n",
7254 bp->port.link_config,
7255 bp->link_params.speed_cap_mask);
7260 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7261 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7262 case PORT_FEATURE_LINK_SPEED_10G_KR:
7263 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7264 bp->link_params.req_line_speed = SPEED_10000;
7265 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7268 BNX2X_ERR("NVRAM config error. "
7269 "Invalid link_config 0x%x"
7270 " speed_cap_mask 0x%x\n",
7271 bp->port.link_config,
7272 bp->link_params.speed_cap_mask);
7278 BNX2X_ERR("NVRAM config error. "
7279 "BAD link speed link_config 0x%x\n",
7280 bp->port.link_config);
7281 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7282 bp->port.advertising = bp->port.supported;
7286 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7287 PORT_FEATURE_FLOW_CONTROL_MASK);
7288 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7289 !(bp->port.supported & SUPPORTED_Autoneg))
7290 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7292 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7293 " advertising 0x%x\n",
7294 bp->link_params.req_line_speed,
7295 bp->link_params.req_duplex,
7296 bp->link_params.req_flow_ctrl, bp->port.advertising);
7299 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7301 int port = BP_PORT(bp);
7304 bp->link_params.bp = bp;
7305 bp->link_params.port = port;
7307 bp->link_params.serdes_config =
7308 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7309 bp->link_params.lane_config =
7310 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7311 bp->link_params.ext_phy_config =
7313 dev_info.port_hw_config[port].external_phy_config);
7314 bp->link_params.speed_cap_mask =
7316 dev_info.port_hw_config[port].speed_capability_mask);
7318 bp->port.link_config =
7319 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7321 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7322 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7323 " link_config 0x%08x\n",
7324 bp->link_params.serdes_config,
7325 bp->link_params.lane_config,
7326 bp->link_params.ext_phy_config,
7327 bp->link_params.speed_cap_mask, bp->port.link_config);
7329 bp->link_params.switch_cfg = (bp->port.link_config &
7330 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7331 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7333 bnx2x_link_settings_requested(bp);
7335 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7336 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7337 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7338 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7339 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7340 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7341 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7342 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7343 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7344 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7347 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7349 int func = BP_FUNC(bp);
7353 bnx2x_get_common_hwinfo(bp);
7357 if (CHIP_IS_E1H(bp)) {
7359 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7362 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7363 FUNC_MF_CFG_E1HOV_TAG_MASK);
7364 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7368 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7370 func, bp->e1hov, bp->e1hov);
7372 BNX2X_DEV_INFO("Single function mode\n");
7374 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7375 " aborting\n", func);
7381 if (!BP_NOMCP(bp)) {
7382 bnx2x_get_port_hwinfo(bp);
7384 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7385 DRV_MSG_SEQ_NUMBER_MASK);
7386 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7390 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7391 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7392 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7393 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7394 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7395 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7396 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7397 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7398 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7399 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7400 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7402 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7410 /* only supposed to happen on emulation/FPGA */
7411 BNX2X_ERR("warning rendom MAC workaround active\n");
7412 random_ether_addr(bp->dev->dev_addr);
7413 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7419 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7421 int func = BP_FUNC(bp);
7424 /* Disable interrupt handling until HW is initialized */
7425 atomic_set(&bp->intr_sem, 1);
7427 mutex_init(&bp->port.phy_mutex);
7429 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7430 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7432 rc = bnx2x_get_hwinfo(bp);
7434 /* need to reset chip if undi was active */
7436 bnx2x_undi_unload(bp);
7438 if (CHIP_REV_IS_FPGA(bp))
7439 printk(KERN_ERR PFX "FPGA detected\n");
7441 if (BP_NOMCP(bp) && (func == 0))
7443 "MCP disabled, must load devices in order!\n");
7447 bp->flags &= ~TPA_ENABLE_FLAG;
7448 bp->dev->features &= ~NETIF_F_LRO;
7450 bp->flags |= TPA_ENABLE_FLAG;
7451 bp->dev->features |= NETIF_F_LRO;
7455 bp->tx_ring_size = MAX_TX_AVAIL;
7456 bp->rx_ring_size = MAX_RX_AVAIL;
7464 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7465 bp->current_interval = (poll ? poll : bp->timer_interval);
7467 init_timer(&bp->timer);
7468 bp->timer.expires = jiffies + bp->current_interval;
7469 bp->timer.data = (unsigned long) bp;
7470 bp->timer.function = bnx2x_timer;
7476 * ethtool service functions
7479 /* All ethtool functions called with rtnl_lock */
7481 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7483 struct bnx2x *bp = netdev_priv(dev);
7485 cmd->supported = bp->port.supported;
7486 cmd->advertising = bp->port.advertising;
7488 if (netif_carrier_ok(dev)) {
7489 cmd->speed = bp->link_vars.line_speed;
7490 cmd->duplex = bp->link_vars.duplex;
7492 cmd->speed = bp->link_params.req_line_speed;
7493 cmd->duplex = bp->link_params.req_duplex;
7498 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7499 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7500 if (vn_max_rate < cmd->speed)
7501 cmd->speed = vn_max_rate;
7504 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7506 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7508 switch (ext_phy_type) {
7509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7510 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7511 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7512 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7513 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7514 cmd->port = PORT_FIBRE;
7517 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7518 cmd->port = PORT_TP;
7521 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7522 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7523 bp->link_params.ext_phy_config);
7527 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7528 bp->link_params.ext_phy_config);
7532 cmd->port = PORT_TP;
7534 cmd->phy_address = bp->port.phy_addr;
7535 cmd->transceiver = XCVR_INTERNAL;
7537 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7538 cmd->autoneg = AUTONEG_ENABLE;
7540 cmd->autoneg = AUTONEG_DISABLE;
7545 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7546 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7547 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7548 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7549 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7550 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7551 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7556 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7558 struct bnx2x *bp = netdev_priv(dev);
7564 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7565 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7566 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7567 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7568 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7569 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7570 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7572 if (cmd->autoneg == AUTONEG_ENABLE) {
7573 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7574 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7578 /* advertise the requested speed and duplex if supported */
7579 cmd->advertising &= bp->port.supported;
7581 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7582 bp->link_params.req_duplex = DUPLEX_FULL;
7583 bp->port.advertising |= (ADVERTISED_Autoneg |
7586 } else { /* forced speed */
7587 /* advertise the requested speed and duplex if supported */
7588 switch (cmd->speed) {
7590 if (cmd->duplex == DUPLEX_FULL) {
7591 if (!(bp->port.supported &
7592 SUPPORTED_10baseT_Full)) {
7594 "10M full not supported\n");
7598 advertising = (ADVERTISED_10baseT_Full |
7601 if (!(bp->port.supported &
7602 SUPPORTED_10baseT_Half)) {
7604 "10M half not supported\n");
7608 advertising = (ADVERTISED_10baseT_Half |
7614 if (cmd->duplex == DUPLEX_FULL) {
7615 if (!(bp->port.supported &
7616 SUPPORTED_100baseT_Full)) {
7618 "100M full not supported\n");
7622 advertising = (ADVERTISED_100baseT_Full |
7625 if (!(bp->port.supported &
7626 SUPPORTED_100baseT_Half)) {
7628 "100M half not supported\n");
7632 advertising = (ADVERTISED_100baseT_Half |
7638 if (cmd->duplex != DUPLEX_FULL) {
7639 DP(NETIF_MSG_LINK, "1G half not supported\n");
7643 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7644 DP(NETIF_MSG_LINK, "1G full not supported\n");
7648 advertising = (ADVERTISED_1000baseT_Full |
7653 if (cmd->duplex != DUPLEX_FULL) {
7655 "2.5G half not supported\n");
7659 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7661 "2.5G full not supported\n");
7665 advertising = (ADVERTISED_2500baseX_Full |
7670 if (cmd->duplex != DUPLEX_FULL) {
7671 DP(NETIF_MSG_LINK, "10G half not supported\n");
7675 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7676 DP(NETIF_MSG_LINK, "10G full not supported\n");
7680 advertising = (ADVERTISED_10000baseT_Full |
7685 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7689 bp->link_params.req_line_speed = cmd->speed;
7690 bp->link_params.req_duplex = cmd->duplex;
7691 bp->port.advertising = advertising;
7694 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7695 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7696 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7697 bp->port.advertising);
7699 if (netif_running(dev)) {
7700 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7707 #define PHY_FW_VER_LEN 10
7709 static void bnx2x_get_drvinfo(struct net_device *dev,
7710 struct ethtool_drvinfo *info)
7712 struct bnx2x *bp = netdev_priv(dev);
7713 char phy_fw_ver[PHY_FW_VER_LEN];
7715 strcpy(info->driver, DRV_MODULE_NAME);
7716 strcpy(info->version, DRV_MODULE_VERSION);
7718 phy_fw_ver[0] = '\0';
7720 bnx2x_acquire_phy_lock(bp);
7721 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7722 (bp->state != BNX2X_STATE_CLOSED),
7723 phy_fw_ver, PHY_FW_VER_LEN);
7724 bnx2x_release_phy_lock(bp);
7727 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7728 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7729 BCM_5710_FW_REVISION_VERSION,
7730 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7731 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7732 strcpy(info->bus_info, pci_name(bp->pdev));
7733 info->n_stats = BNX2X_NUM_STATS;
7734 info->testinfo_len = BNX2X_NUM_TESTS;
7735 info->eedump_len = bp->common.flash_size;
7736 info->regdump_len = 0;
7739 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7741 struct bnx2x *bp = netdev_priv(dev);
7743 if (bp->flags & NO_WOL_FLAG) {
7747 wol->supported = WAKE_MAGIC;
7749 wol->wolopts = WAKE_MAGIC;
7753 memset(&wol->sopass, 0, sizeof(wol->sopass));
7756 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7758 struct bnx2x *bp = netdev_priv(dev);
7760 if (wol->wolopts & ~WAKE_MAGIC)
7763 if (wol->wolopts & WAKE_MAGIC) {
7764 if (bp->flags & NO_WOL_FLAG)
7774 static u32 bnx2x_get_msglevel(struct net_device *dev)
7776 struct bnx2x *bp = netdev_priv(dev);
7778 return bp->msglevel;
7781 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7783 struct bnx2x *bp = netdev_priv(dev);
7785 if (capable(CAP_NET_ADMIN))
7786 bp->msglevel = level;
7789 static int bnx2x_nway_reset(struct net_device *dev)
7791 struct bnx2x *bp = netdev_priv(dev);
7796 if (netif_running(dev)) {
7797 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7804 static int bnx2x_get_eeprom_len(struct net_device *dev)
7806 struct bnx2x *bp = netdev_priv(dev);
7808 return bp->common.flash_size;
7811 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7813 int port = BP_PORT(bp);
7817 /* adjust timeout for emulation/FPGA */
7818 count = NVRAM_TIMEOUT_COUNT;
7819 if (CHIP_REV_IS_SLOW(bp))
7822 /* request access to nvram interface */
7823 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7824 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7826 for (i = 0; i < count*10; i++) {
7827 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7828 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7834 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7835 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7842 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7844 int port = BP_PORT(bp);
7848 /* adjust timeout for emulation/FPGA */
7849 count = NVRAM_TIMEOUT_COUNT;
7850 if (CHIP_REV_IS_SLOW(bp))
7853 /* relinquish nvram interface */
7854 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7855 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7857 for (i = 0; i < count*10; i++) {
7858 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7859 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7865 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7866 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7873 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7877 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7879 /* enable both bits, even on read */
7880 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7881 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7882 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7885 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7889 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7891 /* disable both bits, even after read */
7892 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7893 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7894 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7897 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7903 /* build the command word */
7904 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7906 /* need to clear DONE bit separately */
7907 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7909 /* address of the NVRAM to read from */
7910 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7911 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7913 /* issue a read command */
7914 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7916 /* adjust timeout for emulation/FPGA */
7917 count = NVRAM_TIMEOUT_COUNT;
7918 if (CHIP_REV_IS_SLOW(bp))
7921 /* wait for completion */
7924 for (i = 0; i < count; i++) {
7926 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7928 if (val & MCPR_NVM_COMMAND_DONE) {
7929 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7930 /* we read nvram data in cpu order
7931 * but ethtool sees it as an array of bytes
7932 * converting to big-endian will do the work */
7933 val = cpu_to_be32(val);
7943 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7950 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7952 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7957 if (offset + buf_size > bp->common.flash_size) {
7958 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7959 " buf_size (0x%x) > flash_size (0x%x)\n",
7960 offset, buf_size, bp->common.flash_size);
7964 /* request access to nvram interface */
7965 rc = bnx2x_acquire_nvram_lock(bp);
7969 /* enable access to nvram interface */
7970 bnx2x_enable_nvram_access(bp);
7972 /* read the first word(s) */
7973 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7974 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7975 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7976 memcpy(ret_buf, &val, 4);
7978 /* advance to the next dword */
7979 offset += sizeof(u32);
7980 ret_buf += sizeof(u32);
7981 buf_size -= sizeof(u32);
7986 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7987 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7988 memcpy(ret_buf, &val, 4);
7991 /* disable access to nvram interface */
7992 bnx2x_disable_nvram_access(bp);
7993 bnx2x_release_nvram_lock(bp);
7998 static int bnx2x_get_eeprom(struct net_device *dev,
7999 struct ethtool_eeprom *eeprom, u8 *eebuf)
8001 struct bnx2x *bp = netdev_priv(dev);
8004 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8005 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8006 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8007 eeprom->len, eeprom->len);
8009 /* parameters already validated in ethtool_get_eeprom */
8011 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8016 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8021 /* build the command word */
8022 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8024 /* need to clear DONE bit separately */
8025 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8027 /* write the data */
8028 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8030 /* address of the NVRAM to write to */
8031 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8032 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8034 /* issue the write command */
8035 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8037 /* adjust timeout for emulation/FPGA */
8038 count = NVRAM_TIMEOUT_COUNT;
8039 if (CHIP_REV_IS_SLOW(bp))
8042 /* wait for completion */
8044 for (i = 0; i < count; i++) {
8046 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8047 if (val & MCPR_NVM_COMMAND_DONE) {
8056 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8058 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8066 if (offset + buf_size > bp->common.flash_size) {
8067 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8068 " buf_size (0x%x) > flash_size (0x%x)\n",
8069 offset, buf_size, bp->common.flash_size);
8073 /* request access to nvram interface */
8074 rc = bnx2x_acquire_nvram_lock(bp);
8078 /* enable access to nvram interface */
8079 bnx2x_enable_nvram_access(bp);
8081 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8082 align_offset = (offset & ~0x03);
8083 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8086 val &= ~(0xff << BYTE_OFFSET(offset));
8087 val |= (*data_buf << BYTE_OFFSET(offset));
8089 /* nvram data is returned as an array of bytes
8090 * convert it back to cpu order */
8091 val = be32_to_cpu(val);
8093 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8097 /* disable access to nvram interface */
8098 bnx2x_disable_nvram_access(bp);
8099 bnx2x_release_nvram_lock(bp);
8104 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8112 if (buf_size == 1) /* ethtool */
8113 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8115 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8117 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8122 if (offset + buf_size > bp->common.flash_size) {
8123 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8124 " buf_size (0x%x) > flash_size (0x%x)\n",
8125 offset, buf_size, bp->common.flash_size);
8129 /* request access to nvram interface */
8130 rc = bnx2x_acquire_nvram_lock(bp);
8134 /* enable access to nvram interface */
8135 bnx2x_enable_nvram_access(bp);
8138 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8139 while ((written_so_far < buf_size) && (rc == 0)) {
8140 if (written_so_far == (buf_size - sizeof(u32)))
8141 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8142 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8143 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8144 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8145 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8147 memcpy(&val, data_buf, 4);
8149 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8151 /* advance to the next dword */
8152 offset += sizeof(u32);
8153 data_buf += sizeof(u32);
8154 written_so_far += sizeof(u32);
8158 /* disable access to nvram interface */
8159 bnx2x_disable_nvram_access(bp);
8160 bnx2x_release_nvram_lock(bp);
8165 static int bnx2x_set_eeprom(struct net_device *dev,
8166 struct ethtool_eeprom *eeprom, u8 *eebuf)
8168 struct bnx2x *bp = netdev_priv(dev);
8171 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8172 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8173 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8174 eeprom->len, eeprom->len);
8176 /* parameters already validated in ethtool_set_eeprom */
8178 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8179 if (eeprom->magic == 0x00504859)
8182 bnx2x_acquire_phy_lock(bp);
8183 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8184 bp->link_params.ext_phy_config,
8185 (bp->state != BNX2X_STATE_CLOSED),
8186 eebuf, eeprom->len);
8187 if ((bp->state == BNX2X_STATE_OPEN) ||
8188 (bp->state == BNX2X_STATE_DISABLED)) {
8189 rc |= bnx2x_link_reset(&bp->link_params,
8191 rc |= bnx2x_phy_init(&bp->link_params,
8194 bnx2x_release_phy_lock(bp);
8196 } else /* Only the PMF can access the PHY */
8199 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8204 static int bnx2x_get_coalesce(struct net_device *dev,
8205 struct ethtool_coalesce *coal)
8207 struct bnx2x *bp = netdev_priv(dev);
8209 memset(coal, 0, sizeof(struct ethtool_coalesce));
8211 coal->rx_coalesce_usecs = bp->rx_ticks;
8212 coal->tx_coalesce_usecs = bp->tx_ticks;
8217 static int bnx2x_set_coalesce(struct net_device *dev,
8218 struct ethtool_coalesce *coal)
8220 struct bnx2x *bp = netdev_priv(dev);
8222 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8223 if (bp->rx_ticks > 3000)
8224 bp->rx_ticks = 3000;
8226 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8227 if (bp->tx_ticks > 0x3000)
8228 bp->tx_ticks = 0x3000;
8230 if (netif_running(dev))
8231 bnx2x_update_coalesce(bp);
8236 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8238 struct bnx2x *bp = netdev_priv(dev);
8242 if (data & ETH_FLAG_LRO) {
8243 if (!(dev->features & NETIF_F_LRO)) {
8244 dev->features |= NETIF_F_LRO;
8245 bp->flags |= TPA_ENABLE_FLAG;
8249 } else if (dev->features & NETIF_F_LRO) {
8250 dev->features &= ~NETIF_F_LRO;
8251 bp->flags &= ~TPA_ENABLE_FLAG;
8255 if (changed && netif_running(dev)) {
8256 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8257 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8263 static void bnx2x_get_ringparam(struct net_device *dev,
8264 struct ethtool_ringparam *ering)
8266 struct bnx2x *bp = netdev_priv(dev);
8268 ering->rx_max_pending = MAX_RX_AVAIL;
8269 ering->rx_mini_max_pending = 0;
8270 ering->rx_jumbo_max_pending = 0;
8272 ering->rx_pending = bp->rx_ring_size;
8273 ering->rx_mini_pending = 0;
8274 ering->rx_jumbo_pending = 0;
8276 ering->tx_max_pending = MAX_TX_AVAIL;
8277 ering->tx_pending = bp->tx_ring_size;
8280 static int bnx2x_set_ringparam(struct net_device *dev,
8281 struct ethtool_ringparam *ering)
8283 struct bnx2x *bp = netdev_priv(dev);
8286 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8287 (ering->tx_pending > MAX_TX_AVAIL) ||
8288 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8291 bp->rx_ring_size = ering->rx_pending;
8292 bp->tx_ring_size = ering->tx_pending;
8294 if (netif_running(dev)) {
8295 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8296 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8302 static void bnx2x_get_pauseparam(struct net_device *dev,
8303 struct ethtool_pauseparam *epause)
8305 struct bnx2x *bp = netdev_priv(dev);
8307 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8308 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8310 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8312 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8315 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8316 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8317 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8320 static int bnx2x_set_pauseparam(struct net_device *dev,
8321 struct ethtool_pauseparam *epause)
8323 struct bnx2x *bp = netdev_priv(dev);
8328 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8329 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8330 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8332 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8334 if (epause->rx_pause)
8335 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8337 if (epause->tx_pause)
8338 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8340 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8341 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8343 if (epause->autoneg) {
8344 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8345 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8349 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8350 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8354 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8356 if (netif_running(dev)) {
8357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8364 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8366 struct bnx2x *bp = netdev_priv(dev);
8371 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8373 struct bnx2x *bp = netdev_priv(dev);
8379 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8382 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8383 dev->features |= NETIF_F_TSO6;
8385 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8386 dev->features &= ~NETIF_F_TSO6;
8392 static const struct {
8393 char string[ETH_GSTRING_LEN];
8394 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8395 { "register_test (offline)" },
8396 { "memory_test (offline)" },
8397 { "loopback_test (offline)" },
8398 { "nvram_test (online)" },
8399 { "interrupt_test (online)" },
8400 { "link_test (online)" },
8401 { "idle check (online)" },
8402 { "MC errors (online)" }
8405 static int bnx2x_self_test_count(struct net_device *dev)
8407 return BNX2X_NUM_TESTS;
8410 static int bnx2x_test_registers(struct bnx2x *bp)
8412 int idx, i, rc = -ENODEV;
8414 static const struct {
8419 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8420 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8421 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8422 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8423 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8424 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8425 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8426 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8427 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8428 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8429 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8430 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8431 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8432 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8433 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8434 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8435 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8436 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8437 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8438 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8439 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8440 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8441 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8442 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8443 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8444 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8445 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8446 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8447 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8448 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8449 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8450 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8451 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8452 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8453 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8454 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8455 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8456 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8458 { 0xffffffff, 0, 0x00000000 }
8461 if (!netif_running(bp->dev))
8464 /* Repeat the test twice:
8465 First by writing 0x00000000, second by writing 0xffffffff */
8466 for (idx = 0; idx < 2; idx++) {
8473 wr_val = 0xffffffff;
8477 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8478 u32 offset, mask, save_val, val;
8479 int port = BP_PORT(bp);
8481 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8482 mask = reg_tbl[i].mask;
8484 save_val = REG_RD(bp, offset);
8486 REG_WR(bp, offset, wr_val);
8487 val = REG_RD(bp, offset);
8489 /* Restore the original register's value */
8490 REG_WR(bp, offset, save_val);
8492 /* verify that value is as expected value */
8493 if ((val & mask) != (wr_val & mask))
8504 static int bnx2x_test_memory(struct bnx2x *bp)
8506 int i, j, rc = -ENODEV;
8508 static const struct {
8512 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8513 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8514 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8515 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8516 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8517 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8518 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8522 static const struct {
8527 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8528 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8529 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8530 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8531 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8532 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8534 { NULL, 0xffffffff, 0 }
8537 if (!netif_running(bp->dev))
8540 /* Go through all the memories */
8541 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8542 for (j = 0; j < mem_tbl[i].size; j++)
8543 REG_RD(bp, mem_tbl[i].offset + j*4);
8545 /* Check the parity status */
8546 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8547 val = REG_RD(bp, prty_tbl[i].offset);
8548 if (val & ~(prty_tbl[i].mask)) {
8550 "%s is 0x%x\n", prty_tbl[i].name, val);
8561 static void bnx2x_netif_start(struct bnx2x *bp)
8565 if (atomic_dec_and_test(&bp->intr_sem)) {
8566 if (netif_running(bp->dev)) {
8567 bnx2x_int_enable(bp);
8568 for_each_queue(bp, i)
8569 napi_enable(&bnx2x_fp(bp, i, napi));
8570 if (bp->state == BNX2X_STATE_OPEN)
8571 netif_wake_queue(bp->dev);
8576 static void bnx2x_netif_stop(struct bnx2x *bp)
8580 if (netif_running(bp->dev)) {
8581 netif_tx_disable(bp->dev);
8582 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8583 for_each_queue(bp, i)
8584 napi_disable(&bnx2x_fp(bp, i, napi));
8586 bnx2x_int_disable_sync(bp);
8589 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8594 while (bnx2x_link_test(bp) && cnt--)
8598 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8600 unsigned int pkt_size, num_pkts, i;
8601 struct sk_buff *skb;
8602 unsigned char *packet;
8603 struct bnx2x_fastpath *fp = &bp->fp[0];
8604 u16 tx_start_idx, tx_idx;
8605 u16 rx_start_idx, rx_idx;
8607 struct sw_tx_bd *tx_buf;
8608 struct eth_tx_bd *tx_bd;
8610 union eth_rx_cqe *cqe;
8612 struct sw_rx_bd *rx_buf;
8616 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8617 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8618 bnx2x_acquire_phy_lock(bp);
8619 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8620 bnx2x_release_phy_lock(bp);
8622 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8623 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8624 bnx2x_acquire_phy_lock(bp);
8625 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8626 bnx2x_release_phy_lock(bp);
8627 /* wait until link state is restored */
8628 bnx2x_wait_for_link(bp, link_up);
8634 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8637 goto test_loopback_exit;
8639 packet = skb_put(skb, pkt_size);
8640 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8641 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8642 for (i = ETH_HLEN; i < pkt_size; i++)
8643 packet[i] = (unsigned char) (i & 0xff);
8646 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8647 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8649 pkt_prod = fp->tx_pkt_prod++;
8650 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8651 tx_buf->first_bd = fp->tx_bd_prod;
8654 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8655 mapping = pci_map_single(bp->pdev, skb->data,
8656 skb_headlen(skb), PCI_DMA_TODEVICE);
8657 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8658 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8659 tx_bd->nbd = cpu_to_le16(1);
8660 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8661 tx_bd->vlan = cpu_to_le16(pkt_prod);
8662 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8663 ETH_TX_BD_FLAGS_END_BD);
8664 tx_bd->general_data = ((UNICAST_ADDRESS <<
8665 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8667 fp->hw_tx_prods->bds_prod =
8668 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8669 mb(); /* FW restriction: must not reorder writing nbd and packets */
8670 fp->hw_tx_prods->packets_prod =
8671 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8672 DOORBELL(bp, FP_IDX(fp), 0);
8678 bp->dev->trans_start = jiffies;
8682 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8683 if (tx_idx != tx_start_idx + num_pkts)
8684 goto test_loopback_exit;
8686 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8687 if (rx_idx != rx_start_idx + num_pkts)
8688 goto test_loopback_exit;
8690 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8691 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8692 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8693 goto test_loopback_rx_exit;
8695 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8696 if (len != pkt_size)
8697 goto test_loopback_rx_exit;
8699 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8701 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8702 for (i = ETH_HLEN; i < pkt_size; i++)
8703 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8704 goto test_loopback_rx_exit;
8708 test_loopback_rx_exit:
8709 bp->dev->last_rx = jiffies;
8711 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8712 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8713 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8714 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8716 /* Update producers */
8717 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8719 mmiowb(); /* keep prod updates ordered */
8722 bp->link_params.loopback_mode = LOOPBACK_NONE;
8727 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8731 if (!netif_running(bp->dev))
8732 return BNX2X_LOOPBACK_FAILED;
8734 bnx2x_netif_stop(bp);
8736 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8737 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8738 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8741 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8742 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8743 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8746 bnx2x_netif_start(bp);
8751 #define CRC32_RESIDUAL 0xdebb20e3
8753 static int bnx2x_test_nvram(struct bnx2x *bp)
8755 static const struct {
8759 { 0, 0x14 }, /* bootstrap */
8760 { 0x14, 0xec }, /* dir */
8761 { 0x100, 0x350 }, /* manuf_info */
8762 { 0x450, 0xf0 }, /* feature_info */
8763 { 0x640, 0x64 }, /* upgrade_key_info */
8765 { 0x708, 0x70 }, /* manuf_key_info */
8770 u8 *data = (u8 *)buf;
8774 rc = bnx2x_nvram_read(bp, 0, data, 4);
8776 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8777 goto test_nvram_exit;
8780 magic = be32_to_cpu(buf[0]);
8781 if (magic != 0x669955aa) {
8782 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8784 goto test_nvram_exit;
8787 for (i = 0; nvram_tbl[i].size; i++) {
8789 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8793 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8794 goto test_nvram_exit;
8797 csum = ether_crc_le(nvram_tbl[i].size, data);
8798 if (csum != CRC32_RESIDUAL) {
8800 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8802 goto test_nvram_exit;
8810 static int bnx2x_test_intr(struct bnx2x *bp)
8812 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8815 if (!netif_running(bp->dev))
8818 config->hdr.length_6b = 0;
8819 config->hdr.offset = 0;
8820 config->hdr.client_id = BP_CL_ID(bp);
8821 config->hdr.reserved1 = 0;
8823 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8824 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8825 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8827 bp->set_mac_pending++;
8828 for (i = 0; i < 10; i++) {
8829 if (!bp->set_mac_pending)
8831 msleep_interruptible(10);
8840 static void bnx2x_self_test(struct net_device *dev,
8841 struct ethtool_test *etest, u64 *buf)
8843 struct bnx2x *bp = netdev_priv(dev);
8845 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8847 if (!netif_running(dev))
8850 /* offline tests are not suppoerted in MF mode */
8852 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8854 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8857 link_up = bp->link_vars.link_up;
8858 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8859 bnx2x_nic_load(bp, LOAD_DIAG);
8860 /* wait until link state is restored */
8861 bnx2x_wait_for_link(bp, link_up);
8863 if (bnx2x_test_registers(bp) != 0) {
8865 etest->flags |= ETH_TEST_FL_FAILED;
8867 if (bnx2x_test_memory(bp) != 0) {
8869 etest->flags |= ETH_TEST_FL_FAILED;
8871 buf[2] = bnx2x_test_loopback(bp, link_up);
8873 etest->flags |= ETH_TEST_FL_FAILED;
8875 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8876 bnx2x_nic_load(bp, LOAD_NORMAL);
8877 /* wait until link state is restored */
8878 bnx2x_wait_for_link(bp, link_up);
8880 if (bnx2x_test_nvram(bp) != 0) {
8882 etest->flags |= ETH_TEST_FL_FAILED;
8884 if (bnx2x_test_intr(bp) != 0) {
8886 etest->flags |= ETH_TEST_FL_FAILED;
8889 if (bnx2x_link_test(bp) != 0) {
8891 etest->flags |= ETH_TEST_FL_FAILED;
8893 buf[7] = bnx2x_mc_assert(bp);
8895 etest->flags |= ETH_TEST_FL_FAILED;
8897 #ifdef BNX2X_EXTRA_DEBUG
8898 bnx2x_panic_dump(bp);
8902 static const struct {
8906 #define STATS_FLAGS_PORT 1
8907 #define STATS_FLAGS_FUNC 2
8908 u8 string[ETH_GSTRING_LEN];
8909 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8910 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8911 8, STATS_FLAGS_FUNC, "rx_bytes" },
8912 { STATS_OFFSET32(error_bytes_received_hi),
8913 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8914 { STATS_OFFSET32(total_bytes_transmitted_hi),
8915 8, STATS_FLAGS_FUNC, "tx_bytes" },
8916 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8917 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8918 { STATS_OFFSET32(total_unicast_packets_received_hi),
8919 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8920 { STATS_OFFSET32(total_multicast_packets_received_hi),
8921 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8922 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8923 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8924 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8925 8, STATS_FLAGS_FUNC, "tx_packets" },
8926 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8927 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8928 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8929 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8930 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8931 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8932 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8933 8, STATS_FLAGS_PORT, "rx_align_errors" },
8934 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8935 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8936 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8937 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8938 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8939 8, STATS_FLAGS_PORT, "tx_deferred" },
8940 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8941 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8942 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8943 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8944 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8945 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8946 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8947 8, STATS_FLAGS_PORT, "rx_fragments" },
8948 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8949 8, STATS_FLAGS_PORT, "rx_jabbers" },
8950 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8951 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8952 { STATS_OFFSET32(jabber_packets_received),
8953 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8954 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8955 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8956 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8957 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8958 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8959 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8960 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8961 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8962 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8963 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8964 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8965 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8966 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8967 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8968 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8969 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8970 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8971 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8972 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8973 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8974 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8975 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8976 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8977 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8978 { STATS_OFFSET32(mac_filter_discard),
8979 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8980 { STATS_OFFSET32(no_buff_discard),
8981 4, STATS_FLAGS_FUNC, "rx_discards" },
8982 { STATS_OFFSET32(xxoverflow_discard),
8983 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8984 { STATS_OFFSET32(brb_drop_hi),
8985 8, STATS_FLAGS_PORT, "brb_discard" },
8986 { STATS_OFFSET32(brb_truncate_hi),
8987 8, STATS_FLAGS_PORT, "brb_truncate" },
8988 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8989 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8990 { STATS_OFFSET32(rx_skb_alloc_failed),
8991 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8992 /* 42 */{ STATS_OFFSET32(hw_csum_err),
8993 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8996 #define IS_NOT_E1HMF_STAT(bp, i) \
8997 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8999 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9001 struct bnx2x *bp = netdev_priv(dev);
9004 switch (stringset) {
9006 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9007 if (IS_NOT_E1HMF_STAT(bp, i))
9009 strcpy(buf + j*ETH_GSTRING_LEN,
9010 bnx2x_stats_arr[i].string);
9016 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9021 static int bnx2x_get_stats_count(struct net_device *dev)
9023 struct bnx2x *bp = netdev_priv(dev);
9024 int i, num_stats = 0;
9026 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9027 if (IS_NOT_E1HMF_STAT(bp, i))
9034 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9035 struct ethtool_stats *stats, u64 *buf)
9037 struct bnx2x *bp = netdev_priv(dev);
9038 u32 *hw_stats = (u32 *)&bp->eth_stats;
9041 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9042 if (IS_NOT_E1HMF_STAT(bp, i))
9045 if (bnx2x_stats_arr[i].size == 0) {
9046 /* skip this counter */
9051 if (bnx2x_stats_arr[i].size == 4) {
9052 /* 4-byte counter */
9053 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9057 /* 8-byte counter */
9058 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9059 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9064 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9066 struct bnx2x *bp = netdev_priv(dev);
9067 int port = BP_PORT(bp);
9070 if (!netif_running(dev))
9079 for (i = 0; i < (data * 2); i++) {
9081 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9082 bp->link_params.hw_led_mode,
9083 bp->link_params.chip_id);
9085 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9086 bp->link_params.hw_led_mode,
9087 bp->link_params.chip_id);
9089 msleep_interruptible(500);
9090 if (signal_pending(current))
9094 if (bp->link_vars.link_up)
9095 bnx2x_set_led(bp, port, LED_MODE_OPER,
9096 bp->link_vars.line_speed,
9097 bp->link_params.hw_led_mode,
9098 bp->link_params.chip_id);
9103 static struct ethtool_ops bnx2x_ethtool_ops = {
9104 .get_settings = bnx2x_get_settings,
9105 .set_settings = bnx2x_set_settings,
9106 .get_drvinfo = bnx2x_get_drvinfo,
9107 .get_wol = bnx2x_get_wol,
9108 .set_wol = bnx2x_set_wol,
9109 .get_msglevel = bnx2x_get_msglevel,
9110 .set_msglevel = bnx2x_set_msglevel,
9111 .nway_reset = bnx2x_nway_reset,
9112 .get_link = ethtool_op_get_link,
9113 .get_eeprom_len = bnx2x_get_eeprom_len,
9114 .get_eeprom = bnx2x_get_eeprom,
9115 .set_eeprom = bnx2x_set_eeprom,
9116 .get_coalesce = bnx2x_get_coalesce,
9117 .set_coalesce = bnx2x_set_coalesce,
9118 .get_ringparam = bnx2x_get_ringparam,
9119 .set_ringparam = bnx2x_set_ringparam,
9120 .get_pauseparam = bnx2x_get_pauseparam,
9121 .set_pauseparam = bnx2x_set_pauseparam,
9122 .get_rx_csum = bnx2x_get_rx_csum,
9123 .set_rx_csum = bnx2x_set_rx_csum,
9124 .get_tx_csum = ethtool_op_get_tx_csum,
9125 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9126 .set_flags = bnx2x_set_flags,
9127 .get_flags = ethtool_op_get_flags,
9128 .get_sg = ethtool_op_get_sg,
9129 .set_sg = ethtool_op_set_sg,
9130 .get_tso = ethtool_op_get_tso,
9131 .set_tso = bnx2x_set_tso,
9132 .self_test_count = bnx2x_self_test_count,
9133 .self_test = bnx2x_self_test,
9134 .get_strings = bnx2x_get_strings,
9135 .phys_id = bnx2x_phys_id,
9136 .get_stats_count = bnx2x_get_stats_count,
9137 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9140 /* end of ethtool_ops */
9142 /****************************************************************************
9143 * General service functions
9144 ****************************************************************************/
9146 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9150 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9154 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9155 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9156 PCI_PM_CTRL_PME_STATUS));
9158 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9159 /* delay required during transition out of D3hot */
9164 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9168 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9170 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9173 /* No more memory access after this point until
9174 * device is brought back to D0.
9185 * net_device service functions
9188 static int bnx2x_poll(struct napi_struct *napi, int budget)
9190 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9192 struct bnx2x *bp = fp->bp;
9195 #ifdef BNX2X_STOP_ON_ERROR
9196 if (unlikely(bp->panic))
9200 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9201 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9202 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9204 bnx2x_update_fpsb_idx(fp);
9206 if (BNX2X_HAS_TX_WORK(fp))
9207 bnx2x_tx_int(fp, budget);
9209 if (BNX2X_HAS_RX_WORK(fp))
9210 work_done = bnx2x_rx_int(fp, budget);
9212 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9214 /* must not complete if we consumed full budget */
9215 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9217 #ifdef BNX2X_STOP_ON_ERROR
9220 netif_rx_complete(bp->dev, napi);
9222 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9223 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9224 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9225 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9231 /* we split the first BD into headers and data BDs
9232 * to ease the pain of our fellow micocode engineers
9233 * we use one mapping for both BDs
9234 * So far this has only been observed to happen
9235 * in Other Operating Systems(TM)
9237 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9238 struct bnx2x_fastpath *fp,
9239 struct eth_tx_bd **tx_bd, u16 hlen,
9240 u16 bd_prod, int nbd)
9242 struct eth_tx_bd *h_tx_bd = *tx_bd;
9243 struct eth_tx_bd *d_tx_bd;
9245 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9247 /* first fix first BD */
9248 h_tx_bd->nbd = cpu_to_le16(nbd);
9249 h_tx_bd->nbytes = cpu_to_le16(hlen);
9251 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9252 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9253 h_tx_bd->addr_lo, h_tx_bd->nbd);
9255 /* now get a new data BD
9256 * (after the pbd) and fill it */
9257 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9258 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9260 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9261 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9263 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9264 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9265 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9267 /* this marks the BD as one that has no individual mapping
9268 * the FW ignores this flag in a BD not marked start
9270 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9271 DP(NETIF_MSG_TX_QUEUED,
9272 "TSO split data size is %d (%x:%x)\n",
9273 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9275 /* update tx_bd for marking the last BD flag */
9281 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9284 csum = (u16) ~csum_fold(csum_sub(csum,
9285 csum_partial(t_header - fix, fix, 0)));
9288 csum = (u16) ~csum_fold(csum_add(csum,
9289 csum_partial(t_header, -fix, 0)));
9291 return swab16(csum);
9294 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9298 if (skb->ip_summed != CHECKSUM_PARTIAL)
9302 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9304 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9305 rc |= XMIT_CSUM_TCP;
9309 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9310 rc |= XMIT_CSUM_TCP;
9314 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9317 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9323 /* check if packet requires linearization (packet is too fragmented) */
9324 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9329 int first_bd_sz = 0;
9331 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9332 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9334 if (xmit_type & XMIT_GSO) {
9335 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9336 /* Check if LSO packet needs to be copied:
9337 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9338 int wnd_size = MAX_FETCH_BD - 3;
9339 /* Number of widnows to check */
9340 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9345 /* Headers length */
9346 hlen = (int)(skb_transport_header(skb) - skb->data) +
9349 /* Amount of data (w/o headers) on linear part of SKB*/
9350 first_bd_sz = skb_headlen(skb) - hlen;
9352 wnd_sum = first_bd_sz;
9354 /* Calculate the first sum - it's special */
9355 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9357 skb_shinfo(skb)->frags[frag_idx].size;
9359 /* If there was data on linear skb data - check it */
9360 if (first_bd_sz > 0) {
9361 if (unlikely(wnd_sum < lso_mss)) {
9366 wnd_sum -= first_bd_sz;
9369 /* Others are easier: run through the frag list and
9370 check all windows */
9371 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9373 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9375 if (unlikely(wnd_sum < lso_mss)) {
9380 skb_shinfo(skb)->frags[wnd_idx].size;
9384 /* in non-LSO too fragmented packet should always
9391 if (unlikely(to_copy))
9392 DP(NETIF_MSG_TX_QUEUED,
9393 "Linearization IS REQUIRED for %s packet. "
9394 "num_frags %d hlen %d first_bd_sz %d\n",
9395 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9396 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9401 /* called with netif_tx_lock
9402 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9403 * netif_wake_queue()
9405 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9407 struct bnx2x *bp = netdev_priv(dev);
9408 struct bnx2x_fastpath *fp;
9409 struct sw_tx_bd *tx_buf;
9410 struct eth_tx_bd *tx_bd;
9411 struct eth_tx_parse_bd *pbd = NULL;
9412 u16 pkt_prod, bd_prod;
9415 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9416 int vlan_off = (bp->e1hov ? 4 : 0);
9420 #ifdef BNX2X_STOP_ON_ERROR
9421 if (unlikely(bp->panic))
9422 return NETDEV_TX_BUSY;
9425 fp_index = (smp_processor_id() % bp->num_queues);
9426 fp = &bp->fp[fp_index];
9428 if (unlikely(bnx2x_tx_avail(bp->fp) <
9429 (skb_shinfo(skb)->nr_frags + 3))) {
9430 bp->eth_stats.driver_xoff++,
9431 netif_stop_queue(dev);
9432 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9433 return NETDEV_TX_BUSY;
9436 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9437 " gso type %x xmit_type %x\n",
9438 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9439 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9441 /* First, check if we need to linearaize the skb
9442 (due to FW restrictions) */
9443 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9444 /* Statistics of linearization */
9446 if (skb_linearize(skb) != 0) {
9447 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9448 "silently dropping this SKB\n");
9449 dev_kfree_skb_any(skb);
9450 return NETDEV_TX_OK;
9455 Please read carefully. First we use one BD which we mark as start,
9456 then for TSO or xsum we have a parsing info BD,
9457 and only then we have the rest of the TSO BDs.
9458 (don't forget to mark the last one as last,
9459 and to unmap only AFTER you write to the BD ...)
9460 And above all, all pdb sizes are in words - NOT DWORDS!
9463 pkt_prod = fp->tx_pkt_prod++;
9464 bd_prod = TX_BD(fp->tx_bd_prod);
9466 /* get a tx_buf and first BD */
9467 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9468 tx_bd = &fp->tx_desc_ring[bd_prod];
9470 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9471 tx_bd->general_data = (UNICAST_ADDRESS <<
9472 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9473 tx_bd->general_data |= 1; /* header nbd */
9475 /* remember the first BD of the packet */
9476 tx_buf->first_bd = fp->tx_bd_prod;
9479 DP(NETIF_MSG_TX_QUEUED,
9480 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9481 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9483 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9484 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9485 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9488 tx_bd->vlan = cpu_to_le16(pkt_prod);
9492 /* turn on parsing and get a BD */
9493 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9494 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9496 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9499 if (xmit_type & XMIT_CSUM) {
9500 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9502 /* for now NS flag is not used in Linux */
9503 pbd->global_data = (hlen |
9504 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9505 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9507 pbd->ip_hlen = (skb_transport_header(skb) -
9508 skb_network_header(skb)) / 2;
9510 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9512 pbd->total_hlen = cpu_to_le16(hlen);
9513 hlen = hlen*2 - vlan_off;
9515 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9517 if (xmit_type & XMIT_CSUM_V4)
9518 tx_bd->bd_flags.as_bitfield |=
9519 ETH_TX_BD_FLAGS_IP_CSUM;
9521 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9523 if (xmit_type & XMIT_CSUM_TCP) {
9524 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9527 s8 fix = SKB_CS_OFF(skb); /* signed! */
9529 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9530 pbd->cs_offset = fix / 2;
9532 DP(NETIF_MSG_TX_QUEUED,
9533 "hlen %d offset %d fix %d csum before fix %x\n",
9534 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9537 /* HW bug: fixup the CSUM */
9538 pbd->tcp_pseudo_csum =
9539 bnx2x_csum_fix(skb_transport_header(skb),
9542 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9543 pbd->tcp_pseudo_csum);
9547 mapping = pci_map_single(bp->pdev, skb->data,
9548 skb_headlen(skb), PCI_DMA_TODEVICE);
9550 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9551 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9552 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9553 tx_bd->nbd = cpu_to_le16(nbd);
9554 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9556 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9557 " nbytes %d flags %x vlan %x\n",
9558 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9559 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9560 le16_to_cpu(tx_bd->vlan));
9562 if (xmit_type & XMIT_GSO) {
9564 DP(NETIF_MSG_TX_QUEUED,
9565 "TSO packet len %d hlen %d total len %d tso size %d\n",
9566 skb->len, hlen, skb_headlen(skb),
9567 skb_shinfo(skb)->gso_size);
9569 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9571 if (unlikely(skb_headlen(skb) > hlen))
9572 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9575 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9576 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9577 pbd->tcp_flags = pbd_tcp_flags(skb);
9579 if (xmit_type & XMIT_GSO_V4) {
9580 pbd->ip_id = swab16(ip_hdr(skb)->id);
9581 pbd->tcp_pseudo_csum =
9582 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9584 0, IPPROTO_TCP, 0));
9587 pbd->tcp_pseudo_csum =
9588 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9589 &ipv6_hdr(skb)->daddr,
9590 0, IPPROTO_TCP, 0));
9592 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9595 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9596 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9598 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9599 tx_bd = &fp->tx_desc_ring[bd_prod];
9601 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9602 frag->size, PCI_DMA_TODEVICE);
9604 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9605 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9606 tx_bd->nbytes = cpu_to_le16(frag->size);
9607 tx_bd->vlan = cpu_to_le16(pkt_prod);
9608 tx_bd->bd_flags.as_bitfield = 0;
9610 DP(NETIF_MSG_TX_QUEUED,
9611 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9612 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9613 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9616 /* now at last mark the BD as the last BD */
9617 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9619 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9620 tx_bd, tx_bd->bd_flags.as_bitfield);
9622 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9624 /* now send a tx doorbell, counting the next BD
9625 * if the packet contains or ends with it
9627 if (TX_BD_POFF(bd_prod) < nbd)
9631 DP(NETIF_MSG_TX_QUEUED,
9632 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9633 " tcp_flags %x xsum %x seq %u hlen %u\n",
9634 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9635 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9636 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9638 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9640 fp->hw_tx_prods->bds_prod =
9641 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9642 mb(); /* FW restriction: must not reorder writing nbd and packets */
9643 fp->hw_tx_prods->packets_prod =
9644 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9645 DOORBELL(bp, FP_IDX(fp), 0);
9649 fp->tx_bd_prod += nbd;
9650 dev->trans_start = jiffies;
9652 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9653 netif_stop_queue(dev);
9654 bp->eth_stats.driver_xoff++;
9655 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9656 netif_wake_queue(dev);
9660 return NETDEV_TX_OK;
9663 /* called with rtnl_lock */
9664 static int bnx2x_open(struct net_device *dev)
9666 struct bnx2x *bp = netdev_priv(dev);
9668 bnx2x_set_power_state(bp, PCI_D0);
9670 return bnx2x_nic_load(bp, LOAD_OPEN);
9673 /* called with rtnl_lock */
9674 static int bnx2x_close(struct net_device *dev)
9676 struct bnx2x *bp = netdev_priv(dev);
9678 /* Unload the driver, release IRQs */
9679 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9680 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9681 if (!CHIP_REV_IS_SLOW(bp))
9682 bnx2x_set_power_state(bp, PCI_D3hot);
9687 /* called with netif_tx_lock from set_multicast */
9688 static void bnx2x_set_rx_mode(struct net_device *dev)
9690 struct bnx2x *bp = netdev_priv(dev);
9691 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9692 int port = BP_PORT(bp);
9694 if (bp->state != BNX2X_STATE_OPEN) {
9695 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9699 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9701 if (dev->flags & IFF_PROMISC)
9702 rx_mode = BNX2X_RX_MODE_PROMISC;
9704 else if ((dev->flags & IFF_ALLMULTI) ||
9705 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9706 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9708 else { /* some multicasts */
9709 if (CHIP_IS_E1(bp)) {
9711 struct dev_mc_list *mclist;
9712 struct mac_configuration_cmd *config =
9713 bnx2x_sp(bp, mcast_config);
9715 for (i = 0, mclist = dev->mc_list;
9716 mclist && (i < dev->mc_count);
9717 i++, mclist = mclist->next) {
9719 config->config_table[i].
9720 cam_entry.msb_mac_addr =
9721 swab16(*(u16 *)&mclist->dmi_addr[0]);
9722 config->config_table[i].
9723 cam_entry.middle_mac_addr =
9724 swab16(*(u16 *)&mclist->dmi_addr[2]);
9725 config->config_table[i].
9726 cam_entry.lsb_mac_addr =
9727 swab16(*(u16 *)&mclist->dmi_addr[4]);
9728 config->config_table[i].cam_entry.flags =
9730 config->config_table[i].
9731 target_table_entry.flags = 0;
9732 config->config_table[i].
9733 target_table_entry.client_id = 0;
9734 config->config_table[i].
9735 target_table_entry.vlan_id = 0;
9738 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9739 config->config_table[i].
9740 cam_entry.msb_mac_addr,
9741 config->config_table[i].
9742 cam_entry.middle_mac_addr,
9743 config->config_table[i].
9744 cam_entry.lsb_mac_addr);
9746 old = config->hdr.length_6b;
9748 for (; i < old; i++) {
9749 if (CAM_IS_INVALID(config->
9751 i--; /* already invalidated */
9755 CAM_INVALIDATE(config->
9760 if (CHIP_REV_IS_SLOW(bp))
9761 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9763 offset = BNX2X_MAX_MULTICAST*(1 + port);
9765 config->hdr.length_6b = i;
9766 config->hdr.offset = offset;
9767 config->hdr.client_id = BP_CL_ID(bp);
9768 config->hdr.reserved1 = 0;
9770 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9771 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9772 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9775 /* Accept one or more multicasts */
9776 struct dev_mc_list *mclist;
9777 u32 mc_filter[MC_HASH_SIZE];
9778 u32 crc, bit, regidx;
9781 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9783 for (i = 0, mclist = dev->mc_list;
9784 mclist && (i < dev->mc_count);
9785 i++, mclist = mclist->next) {
9787 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9788 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9789 mclist->dmi_addr[0], mclist->dmi_addr[1],
9790 mclist->dmi_addr[2], mclist->dmi_addr[3],
9791 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9793 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9794 bit = (crc >> 24) & 0xff;
9797 mc_filter[regidx] |= (1 << bit);
9800 for (i = 0; i < MC_HASH_SIZE; i++)
9801 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9806 bp->rx_mode = rx_mode;
9807 bnx2x_set_storm_rx_mode(bp);
9810 /* called with rtnl_lock */
9811 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9813 struct sockaddr *addr = p;
9814 struct bnx2x *bp = netdev_priv(dev);
9816 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9819 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9820 if (netif_running(dev)) {
9822 bnx2x_set_mac_addr_e1(bp);
9824 bnx2x_set_mac_addr_e1h(bp);
9830 /* called with rtnl_lock */
9831 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9833 struct mii_ioctl_data *data = if_mii(ifr);
9834 struct bnx2x *bp = netdev_priv(dev);
9839 data->phy_id = bp->port.phy_addr;
9846 if (!netif_running(dev))
9849 mutex_lock(&bp->port.phy_mutex);
9850 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9851 DEFAULT_PHY_DEV_ADDR,
9852 (data->reg_num & 0x1f), &mii_regval);
9853 data->val_out = mii_regval;
9854 mutex_unlock(&bp->port.phy_mutex);
9859 if (!capable(CAP_NET_ADMIN))
9862 if (!netif_running(dev))
9865 mutex_lock(&bp->port.phy_mutex);
9866 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9867 DEFAULT_PHY_DEV_ADDR,
9868 (data->reg_num & 0x1f), data->val_in);
9869 mutex_unlock(&bp->port.phy_mutex);
9880 /* called with rtnl_lock */
9881 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9883 struct bnx2x *bp = netdev_priv(dev);
9886 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9887 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9890 /* This does not race with packet allocation
9891 * because the actual alloc size is
9892 * only updated as part of load
9896 if (netif_running(dev)) {
9897 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9898 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9904 static void bnx2x_tx_timeout(struct net_device *dev)
9906 struct bnx2x *bp = netdev_priv(dev);
9908 #ifdef BNX2X_STOP_ON_ERROR
9912 /* This allows the netif to be shutdown gracefully before resetting */
9913 schedule_work(&bp->reset_task);
9917 /* called with rtnl_lock */
9918 static void bnx2x_vlan_rx_register(struct net_device *dev,
9919 struct vlan_group *vlgrp)
9921 struct bnx2x *bp = netdev_priv(dev);
9924 if (netif_running(dev))
9925 bnx2x_set_client_config(bp);
9930 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9931 static void poll_bnx2x(struct net_device *dev)
9933 struct bnx2x *bp = netdev_priv(dev);
9935 disable_irq(bp->pdev->irq);
9936 bnx2x_interrupt(bp->pdev->irq, dev);
9937 enable_irq(bp->pdev->irq);
9941 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9942 struct net_device *dev)
9947 SET_NETDEV_DEV(dev, &pdev->dev);
9948 bp = netdev_priv(dev);
9953 bp->func = PCI_FUNC(pdev->devfn);
9955 rc = pci_enable_device(pdev);
9957 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9961 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9962 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9965 goto err_out_disable;
9968 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9969 printk(KERN_ERR PFX "Cannot find second PCI device"
9970 " base address, aborting\n");
9972 goto err_out_disable;
9975 if (atomic_read(&pdev->enable_cnt) == 1) {
9976 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9978 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9980 goto err_out_disable;
9983 pci_set_master(pdev);
9984 pci_save_state(pdev);
9987 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9988 if (bp->pm_cap == 0) {
9989 printk(KERN_ERR PFX "Cannot find power management"
9990 " capability, aborting\n");
9992 goto err_out_release;
9995 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9996 if (bp->pcie_cap == 0) {
9997 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10000 goto err_out_release;
10003 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10004 bp->flags |= USING_DAC_FLAG;
10005 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10006 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10007 " failed, aborting\n");
10009 goto err_out_release;
10012 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10013 printk(KERN_ERR PFX "System does not support DMA,"
10016 goto err_out_release;
10019 dev->mem_start = pci_resource_start(pdev, 0);
10020 dev->base_addr = dev->mem_start;
10021 dev->mem_end = pci_resource_end(pdev, 0);
10023 dev->irq = pdev->irq;
10025 bp->regview = ioremap_nocache(dev->base_addr,
10026 pci_resource_len(pdev, 0));
10027 if (!bp->regview) {
10028 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10030 goto err_out_release;
10033 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10034 min_t(u64, BNX2X_DB_SIZE,
10035 pci_resource_len(pdev, 2)));
10036 if (!bp->doorbells) {
10037 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10039 goto err_out_unmap;
10042 bnx2x_set_power_state(bp, PCI_D0);
10044 /* clean indirect addresses */
10045 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10046 PCICFG_VENDOR_ID_OFFSET);
10047 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10048 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10049 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10050 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10052 dev->hard_start_xmit = bnx2x_start_xmit;
10053 dev->watchdog_timeo = TX_TIMEOUT;
10055 dev->ethtool_ops = &bnx2x_ethtool_ops;
10056 dev->open = bnx2x_open;
10057 dev->stop = bnx2x_close;
10058 dev->set_multicast_list = bnx2x_set_rx_mode;
10059 dev->set_mac_address = bnx2x_change_mac_addr;
10060 dev->do_ioctl = bnx2x_ioctl;
10061 dev->change_mtu = bnx2x_change_mtu;
10062 dev->tx_timeout = bnx2x_tx_timeout;
10064 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10066 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10067 dev->poll_controller = poll_bnx2x;
10069 dev->features |= NETIF_F_SG;
10070 dev->features |= NETIF_F_HW_CSUM;
10071 if (bp->flags & USING_DAC_FLAG)
10072 dev->features |= NETIF_F_HIGHDMA;
10074 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10076 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10077 dev->features |= NETIF_F_TSO6;
10083 iounmap(bp->regview);
10084 bp->regview = NULL;
10086 if (bp->doorbells) {
10087 iounmap(bp->doorbells);
10088 bp->doorbells = NULL;
10092 if (atomic_read(&pdev->enable_cnt) == 1)
10093 pci_release_regions(pdev);
10096 pci_disable_device(pdev);
10097 pci_set_drvdata(pdev, NULL);
10103 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10105 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10107 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10111 /* return value of 1=2.5GHz 2=5GHz */
10112 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10114 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10116 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10120 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10121 const struct pci_device_id *ent)
10123 static int version_printed;
10124 struct net_device *dev = NULL;
10127 DECLARE_MAC_BUF(mac);
10129 if (version_printed++ == 0)
10130 printk(KERN_INFO "%s", version);
10132 /* dev zeroed in init_etherdev */
10133 dev = alloc_etherdev(sizeof(*bp));
10135 printk(KERN_ERR PFX "Cannot allocate net device\n");
10139 netif_carrier_off(dev);
10141 bp = netdev_priv(dev);
10142 bp->msglevel = debug;
10144 rc = bnx2x_init_dev(pdev, dev);
10150 rc = register_netdev(dev);
10152 dev_err(&pdev->dev, "Cannot register net device\n");
10153 goto init_one_exit;
10156 pci_set_drvdata(pdev, dev);
10158 rc = bnx2x_init_bp(bp);
10160 unregister_netdev(dev);
10161 goto init_one_exit;
10164 bp->common.name = board_info[ent->driver_data].name;
10165 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10166 " IRQ %d, ", dev->name, bp->common.name,
10167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10168 bnx2x_get_pcie_width(bp),
10169 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10170 dev->base_addr, bp->pdev->irq);
10171 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10176 iounmap(bp->regview);
10179 iounmap(bp->doorbells);
10183 if (atomic_read(&pdev->enable_cnt) == 1)
10184 pci_release_regions(pdev);
10186 pci_disable_device(pdev);
10187 pci_set_drvdata(pdev, NULL);
10192 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10194 struct net_device *dev = pci_get_drvdata(pdev);
10198 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10201 bp = netdev_priv(dev);
10203 unregister_netdev(dev);
10206 iounmap(bp->regview);
10209 iounmap(bp->doorbells);
10213 if (atomic_read(&pdev->enable_cnt) == 1)
10214 pci_release_regions(pdev);
10216 pci_disable_device(pdev);
10217 pci_set_drvdata(pdev, NULL);
10220 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10222 struct net_device *dev = pci_get_drvdata(pdev);
10226 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10229 bp = netdev_priv(dev);
10233 pci_save_state(pdev);
10235 if (!netif_running(dev)) {
10240 netif_device_detach(dev);
10242 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10244 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10251 static int bnx2x_resume(struct pci_dev *pdev)
10253 struct net_device *dev = pci_get_drvdata(pdev);
10258 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10261 bp = netdev_priv(dev);
10265 pci_restore_state(pdev);
10267 if (!netif_running(dev)) {
10272 bnx2x_set_power_state(bp, PCI_D0);
10273 netif_device_attach(dev);
10275 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10283 * bnx2x_io_error_detected - called when PCI error is detected
10284 * @pdev: Pointer to PCI device
10285 * @state: The current pci connection state
10287 * This function is called after a PCI bus error affecting
10288 * this device has been detected.
10290 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10291 pci_channel_state_t state)
10293 struct net_device *dev = pci_get_drvdata(pdev);
10294 struct bnx2x *bp = netdev_priv(dev);
10298 netif_device_detach(dev);
10300 if (netif_running(dev))
10301 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10303 pci_disable_device(pdev);
10307 /* Request a slot reset */
10308 return PCI_ERS_RESULT_NEED_RESET;
10312 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10313 * @pdev: Pointer to PCI device
10315 * Restart the card from scratch, as if from a cold-boot.
10317 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10319 struct net_device *dev = pci_get_drvdata(pdev);
10320 struct bnx2x *bp = netdev_priv(dev);
10324 if (pci_enable_device(pdev)) {
10325 dev_err(&pdev->dev,
10326 "Cannot re-enable PCI device after reset\n");
10328 return PCI_ERS_RESULT_DISCONNECT;
10331 pci_set_master(pdev);
10332 pci_restore_state(pdev);
10334 if (netif_running(dev))
10335 bnx2x_set_power_state(bp, PCI_D0);
10339 return PCI_ERS_RESULT_RECOVERED;
10343 * bnx2x_io_resume - called when traffic can start flowing again
10344 * @pdev: Pointer to PCI device
10346 * This callback is called when the error recovery driver tells us that
10347 * its OK to resume normal operation.
10349 static void bnx2x_io_resume(struct pci_dev *pdev)
10351 struct net_device *dev = pci_get_drvdata(pdev);
10352 struct bnx2x *bp = netdev_priv(dev);
10356 if (netif_running(dev))
10357 bnx2x_nic_load(bp, LOAD_OPEN);
10359 netif_device_attach(dev);
10364 static struct pci_error_handlers bnx2x_err_handler = {
10365 .error_detected = bnx2x_io_error_detected,
10366 .slot_reset = bnx2x_io_slot_reset,
10367 .resume = bnx2x_io_resume,
10370 static struct pci_driver bnx2x_pci_driver = {
10371 .name = DRV_MODULE_NAME,
10372 .id_table = bnx2x_pci_tbl,
10373 .probe = bnx2x_init_one,
10374 .remove = __devexit_p(bnx2x_remove_one),
10375 .suspend = bnx2x_suspend,
10376 .resume = bnx2x_resume,
10377 .err_handler = &bnx2x_err_handler,
10380 static int __init bnx2x_init(void)
10382 return pci_register_driver(&bnx2x_pci_driver);
10385 static void __exit bnx2x_cleanup(void)
10387 pci_unregister_driver(&bnx2x_pci_driver);
10390 module_init(bnx2x_init);
10391 module_exit(bnx2x_cleanup);