1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
245 BNX2X_ERR("dmae timeout!\n");
251 mutex_unlock(&bp->dmae_mutex);
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
256 struct dmae_command *dmae = &bp->init_dmae;
257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271 mutex_lock(&bp->dmae_mutex);
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
282 DMAE_CMD_ENDIANITY_DW_SWAP |
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293 dmae->comp_val = DMAE_COMP_VAL;
295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
309 while (*wb_comp != DMAE_COMP_VAL) {
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
318 BNX2X_ERR("dmae timeout!\n");
323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
327 mutex_unlock(&bp->dmae_mutex);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
345 REG_RD_DMAE(bp, reg, wb_data, 2);
347 return HILO_U64(wb_data[0], wb_data[1]);
351 static int bnx2x_mc_assert(struct bnx2x *bp)
355 u32 row0, row1, row2, row3;
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
472 static void bnx2x_fw_dump(struct bnx2x *bp)
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 printk(KERN_CONT "%s", (char *)data);
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x *bp)
504 bp->stats_state = STATS_STATE_DISABLED;
505 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507 BNX2X_ERR("begin crash dump -----------------\n");
509 for_each_queue(bp, i) {
510 struct bnx2x_fastpath *fp = &bp->fp[i];
511 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
514 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
515 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
516 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
517 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
518 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
519 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
520 fp->rx_bd_prod, fp->rx_bd_cons,
521 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
524 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
525 " *sb_u_idx(%x) bd data(%x,%x)\n",
526 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527 fp->status_blk->c_status_block.status_block_index,
529 fp->status_blk->u_status_block.status_block_index,
530 hw_prods->packets_prod, hw_prods->bds_prod);
532 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534 for (j = start; j < end; j++) {
535 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538 sw_bd->skb, sw_bd->first_bd);
541 start = TX_BD(fp->tx_bd_cons - 10);
542 end = TX_BD(fp->tx_bd_cons + 254);
543 for (j = start; j < end; j++) {
544 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
550 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552 for (j = start; j < end; j++) {
553 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
557 j, rx_bd[1], rx_bd[0], sw_bd->skb);
561 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562 for (j = start; j < end; j++) {
563 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
567 j, rx_sge[1], rx_sge[0], sw_page->page);
570 start = RCQ_BD(fp->rx_comp_cons - 10);
571 end = RCQ_BD(fp->rx_comp_cons + 503);
572 for (j = start; j < end; j++) {
573 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576 j, cqe[0], cqe[1], cqe[2], cqe[3]);
580 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
581 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
582 " spq_prod_idx(%u)\n",
583 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
584 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
588 BNX2X_ERR("end crash dump -----------------\n");
591 static void bnx2x_int_enable(struct bnx2x *bp)
593 int port = BP_PORT(bp);
594 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595 u32 val = REG_RD(bp, addr);
596 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
599 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
604 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
605 HC_CONFIG_0_REG_INT_LINE_EN_0 |
606 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
608 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
609 val, port, addr, msix);
611 REG_WR(bp, addr, val);
613 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
617 val, port, addr, msix);
619 REG_WR(bp, addr, val);
621 if (CHIP_IS_E1H(bp)) {
622 /* init leading/trailing edge */
624 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
626 /* enable nig attention */
631 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 static void bnx2x_int_disable(struct bnx2x *bp)
638 int port = BP_PORT(bp);
639 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640 u32 val = REG_RD(bp, addr);
642 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0 |
645 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
647 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
650 REG_WR(bp, addr, val);
651 if (REG_RD(bp, addr) != val)
652 BNX2X_ERR("BUG! proper val not read from IGU!\n");
655 static void bnx2x_int_disable_sync(struct bnx2x *bp)
657 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
660 /* disable interrupt handling */
661 atomic_inc(&bp->intr_sem);
662 /* prevent the HW from sending interrupts */
663 bnx2x_int_disable(bp);
665 /* make sure all ISRs are done */
667 for_each_queue(bp, i)
668 synchronize_irq(bp->msix_table[i].vector);
670 /* one more for the Slow Path IRQ */
671 synchronize_irq(bp->msix_table[i].vector);
673 synchronize_irq(bp->pdev->irq);
675 /* make sure sp_task is not running */
676 cancel_work_sync(&bp->sp_task);
682 * General service functions
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686 u8 storm, u16 index, u8 op, u8 update)
688 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
689 struct igu_ack_register igu_ack;
691 igu_ack.status_block_index = index;
692 igu_ack.sb_id_and_flags =
693 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
694 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
700 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
703 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705 struct host_status_block *fpsb = fp->status_blk;
708 barrier(); /* status block is written to by the chip */
709 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
713 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
720 static u16 bnx2x_ack_int(struct bnx2x *bp)
722 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
723 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
726 result, BAR_IGU_INTMEM + igu_addr);
729 #warning IGU_DEBUG active
731 BNX2X_ERR("read %x from IGU\n", result);
732 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
740 * fast path service functions
743 /* free skb in the packet ring at pos idx
744 * return idx of last bd freed
746 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
749 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
750 struct eth_tx_bd *tx_bd;
751 struct sk_buff *skb = tx_buf->skb;
752 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
755 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
759 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
760 tx_bd = &fp->tx_desc_ring[bd_idx];
761 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
762 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
764 nbd = le16_to_cpu(tx_bd->nbd) - 1;
765 new_cons = nbd + tx_buf->first_bd;
766 #ifdef BNX2X_STOP_ON_ERROR
767 if (nbd > (MAX_SKB_FRAGS + 2)) {
768 BNX2X_ERR("BAD nbd!\n");
773 /* Skip a parse bd and the TSO split header bd
774 since they have no mapping */
776 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
779 ETH_TX_BD_FLAGS_TCP_CSUM |
780 ETH_TX_BD_FLAGS_SW_LSO)) {
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 tx_bd = &fp->tx_desc_ring[bd_idx];
784 /* is this a TSO split header bd? */
785 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
787 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
794 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
795 tx_bd = &fp->tx_desc_ring[bd_idx];
796 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
797 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
799 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
805 tx_buf->first_bd = 0;
811 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
817 barrier(); /* Tell compiler that prod and cons can change */
818 prod = fp->tx_bd_prod;
819 cons = fp->tx_bd_cons;
821 /* NUM_TX_RINGS = number of "next-page" entries
822 It will be used as a threshold */
823 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
825 #ifdef BNX2X_STOP_ON_ERROR
827 WARN_ON(used > fp->bp->tx_ring_size);
828 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
831 return (s16)(fp->bp->tx_ring_size) - used;
834 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
836 struct bnx2x *bp = fp->bp;
837 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
840 #ifdef BNX2X_STOP_ON_ERROR
841 if (unlikely(bp->panic))
845 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
846 sw_cons = fp->tx_pkt_cons;
848 while (sw_cons != hw_cons) {
851 pkt_cons = TX_BD(sw_cons);
853 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
855 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
856 hw_cons, sw_cons, pkt_cons);
858 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
860 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
863 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
871 fp->tx_pkt_cons = sw_cons;
872 fp->tx_bd_cons = bd_cons;
874 /* Need to make the tx_cons update visible to start_xmit()
875 * before checking for netif_queue_stopped(). Without the
876 * memory barrier, there is a small possibility that start_xmit()
877 * will miss it and cause the queue to be stopped forever.
881 /* TBD need a thresh? */
882 if (unlikely(netif_queue_stopped(bp->dev))) {
884 netif_tx_lock(bp->dev);
886 if (netif_queue_stopped(bp->dev) &&
887 (bp->state == BNX2X_STATE_OPEN) &&
888 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
889 netif_wake_queue(bp->dev);
891 netif_tx_unlock(bp->dev);
895 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
896 union eth_rx_cqe *rr_cqe)
898 struct bnx2x *bp = fp->bp;
899 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
900 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
903 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
904 FP_IDX(fp), cid, command, bp->state,
905 rr_cqe->ramrod_cqe.ramrod_type);
910 switch (command | fp->state) {
911 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
912 BNX2X_FP_STATE_OPENING):
913 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
915 fp->state = BNX2X_FP_STATE_OPEN;
918 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
919 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
921 fp->state = BNX2X_FP_STATE_HALTED;
925 BNX2X_ERR("unexpected MC reply (%d) "
926 "fp->state is %x\n", command, fp->state);
929 mb(); /* force bnx2x_wait_ramrod() to see the change */
933 switch (command | bp->state) {
934 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
935 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
936 bp->state = BNX2X_STATE_OPEN;
939 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
940 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
941 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
942 fp->state = BNX2X_FP_STATE_HALTED;
945 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
946 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
947 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
952 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
953 bp->set_mac_pending = 0;
956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
957 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
961 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
965 mb(); /* force bnx2x_wait_ramrod() to see the change */
968 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
969 struct bnx2x_fastpath *fp, u16 index)
971 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
972 struct page *page = sw_buf->page;
973 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
975 /* Skip "next page" elements */
979 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
980 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
981 __free_pages(page, PAGES_PER_SGE_SHIFT);
988 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
989 struct bnx2x_fastpath *fp, int last)
993 for (i = 0; i < last; i++)
994 bnx2x_free_rx_sge(bp, fp, i);
997 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
1000 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1001 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1005 if (unlikely(page == NULL))
1008 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1009 PCI_DMA_FROMDEVICE);
1010 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1011 __free_pages(page, PAGES_PER_SGE_SHIFT);
1015 sw_buf->page = page;
1016 pci_unmap_addr_set(sw_buf, mapping, mapping);
1018 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1019 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1024 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1025 struct bnx2x_fastpath *fp, u16 index)
1027 struct sk_buff *skb;
1028 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1029 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1032 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1033 if (unlikely(skb == NULL))
1036 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1037 PCI_DMA_FROMDEVICE);
1038 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1044 pci_unmap_addr_set(rx_buf, mapping, mapping);
1046 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1047 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1052 /* note that we are not allocating a new skb,
1053 * we are just moving one from cons to prod
1054 * we are not creating a new mapping,
1055 * so there is no need to check for dma_mapping_error().
1057 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1058 struct sk_buff *skb, u16 cons, u16 prod)
1060 struct bnx2x *bp = fp->bp;
1061 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1062 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1063 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1064 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1066 pci_dma_sync_single_for_device(bp->pdev,
1067 pci_unmap_addr(cons_rx_buf, mapping),
1068 bp->rx_offset + RX_COPY_THRESH,
1069 PCI_DMA_FROMDEVICE);
1071 prod_rx_buf->skb = cons_rx_buf->skb;
1072 pci_unmap_addr_set(prod_rx_buf, mapping,
1073 pci_unmap_addr(cons_rx_buf, mapping));
1074 *prod_bd = *cons_bd;
1077 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1080 u16 last_max = fp->last_max_sge;
1082 if (SUB_S16(idx, last_max) > 0)
1083 fp->last_max_sge = idx;
1086 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1090 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1091 int idx = RX_SGE_CNT * i - 1;
1093 for (j = 0; j < 2; j++) {
1094 SGE_MASK_CLEAR_BIT(fp, idx);
1100 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1101 struct eth_fast_path_rx_cqe *fp_cqe)
1103 struct bnx2x *bp = fp->bp;
1104 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1105 le16_to_cpu(fp_cqe->len_on_bd)) >>
1107 u16 last_max, last_elem, first_elem;
1114 /* First mark all used pages */
1115 for (i = 0; i < sge_len; i++)
1116 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1118 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1119 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121 /* Here we assume that the last SGE index is the biggest */
1122 prefetch((void *)(fp->sge_mask));
1123 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1125 last_max = RX_SGE(fp->last_max_sge);
1126 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1127 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1129 /* If ring is not full */
1130 if (last_elem + 1 != first_elem)
1133 /* Now update the prod */
1134 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1135 if (likely(fp->sge_mask[i]))
1138 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1139 delta += RX_SGE_MASK_ELEM_SZ;
1143 fp->rx_sge_prod += delta;
1144 /* clear page-end entries */
1145 bnx2x_clear_sge_mask_next_elems(fp);
1148 DP(NETIF_MSG_RX_STATUS,
1149 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1150 fp->last_max_sge, fp->rx_sge_prod);
1153 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1155 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1156 memset(fp->sge_mask, 0xff,
1157 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1159 /* Clear the two last indeces in the page to 1:
1160 these are the indeces that correspond to the "next" element,
1161 hence will never be indicated and should be removed from
1162 the calculations. */
1163 bnx2x_clear_sge_mask_next_elems(fp);
1166 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1167 struct sk_buff *skb, u16 cons, u16 prod)
1169 struct bnx2x *bp = fp->bp;
1170 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1171 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1172 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 /* move empty skb from pool to prod and map it */
1176 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1177 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1178 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1179 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1181 /* move partial skb from cons to pool (don't unmap yet) */
1182 fp->tpa_pool[queue] = *cons_rx_buf;
1184 /* mark bin state as start - print error if current state != stop */
1185 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1186 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1188 fp->tpa_state[queue] = BNX2X_TPA_START;
1190 /* point prod_bd to new skb */
1191 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1194 #ifdef BNX2X_STOP_ON_ERROR
1195 fp->tpa_queue_used |= (1 << queue);
1196 #ifdef __powerpc64__
1197 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1199 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1201 fp->tpa_queue_used);
1205 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1206 struct sk_buff *skb,
1207 struct eth_fast_path_rx_cqe *fp_cqe,
1210 struct sw_rx_page *rx_pg, old_rx_pg;
1212 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1213 u32 i, frag_len, frag_size, pages;
1217 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1218 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1220 /* This is needed in order to enable forwarding support */
1222 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1223 max(frag_size, (u32)len_on_bd));
1225 #ifdef BNX2X_STOP_ON_ERROR
1226 if (pages > 8*PAGES_PER_SGE) {
1227 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1229 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1230 fp_cqe->pkt_len, len_on_bd);
1236 /* Run through the SGL and compose the fragmented skb */
1237 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1238 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1240 /* FW gives the indices of the SGE as if the ring is an array
1241 (meaning that "next" element will consume 2 indices) */
1242 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1243 rx_pg = &fp->rx_page_ring[sge_idx];
1247 /* If we fail to allocate a substitute page, we simply stop
1248 where we are and drop the whole packet */
1249 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1250 if (unlikely(err)) {
1251 bp->eth_stats.rx_skb_alloc_failed++;
1255 /* Unmap the page as we r going to pass it to the stack */
1256 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1257 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1259 /* Add one frag and update the appropriate fields in the skb */
1260 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1262 skb->data_len += frag_len;
1263 skb->truesize += frag_len;
1264 skb->len += frag_len;
1266 frag_size -= frag_len;
1272 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1276 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1277 struct sk_buff *skb = rx_buf->skb;
1279 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1281 /* Unmap skb in the pool anyway, as we are going to change
1282 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1284 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1285 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1287 if (likely(new_skb)) {
1288 /* fix ip xsum and give it to the stack */
1289 /* (no need to map the new skb) */
1292 prefetch(((char *)(skb)) + 128);
1294 #ifdef BNX2X_STOP_ON_ERROR
1295 if (pad + len > bp->rx_buf_size) {
1296 BNX2X_ERR("skb_put is about to fail... "
1297 "pad %d len %d rx_buf_size %d\n",
1298 pad, len, bp->rx_buf_size);
1304 skb_reserve(skb, pad);
1307 skb->protocol = eth_type_trans(skb, bp->dev);
1308 skb->ip_summed = CHECKSUM_UNNECESSARY;
1313 iph = (struct iphdr *)skb->data;
1315 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1318 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1319 &cqe->fast_path_cqe, cqe_idx)) {
1321 if ((bp->vlgrp != NULL) &&
1322 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1323 PARSING_FLAGS_VLAN))
1324 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1325 le16_to_cpu(cqe->fast_path_cqe.
1329 netif_receive_skb(skb);
1331 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1332 " - dropping packet!\n");
1336 bp->dev->last_rx = jiffies;
1338 /* put new skb in bin */
1339 fp->tpa_pool[queue].skb = new_skb;
1342 /* else drop the packet and keep the buffer in the bin */
1343 DP(NETIF_MSG_RX_STATUS,
1344 "Failed to allocate new skb - dropping packet!\n");
1345 bp->eth_stats.rx_skb_alloc_failed++;
1348 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1351 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1352 struct bnx2x_fastpath *fp,
1353 u16 bd_prod, u16 rx_comp_prod,
1356 struct tstorm_eth_rx_producers rx_prods = {0};
1359 /* Update producers */
1360 rx_prods.bd_prod = bd_prod;
1361 rx_prods.cqe_prod = rx_comp_prod;
1362 rx_prods.sge_prod = rx_sge_prod;
1364 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1365 REG_WR(bp, BAR_TSTRORM_INTMEM +
1366 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1367 ((u32 *)&rx_prods)[i]);
1369 DP(NETIF_MSG_RX_STATUS,
1370 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1371 bd_prod, rx_comp_prod, rx_sge_prod);
1374 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1376 struct bnx2x *bp = fp->bp;
1377 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1378 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1382 #ifdef BNX2X_STOP_ON_ERROR
1383 if (unlikely(bp->panic))
1387 /* CQ "next element" is of the size of the regular element,
1388 that's why it's ok here */
1389 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1390 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1393 bd_cons = fp->rx_bd_cons;
1394 bd_prod = fp->rx_bd_prod;
1395 bd_prod_fw = bd_prod;
1396 sw_comp_cons = fp->rx_comp_cons;
1397 sw_comp_prod = fp->rx_comp_prod;
1399 /* Memory barrier necessary as speculative reads of the rx
1400 * buffer can be ahead of the index in the status block
1404 DP(NETIF_MSG_RX_STATUS,
1405 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1406 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1408 while (sw_comp_cons != hw_comp_cons) {
1409 struct sw_rx_bd *rx_buf = NULL;
1410 struct sk_buff *skb;
1411 union eth_rx_cqe *cqe;
1415 comp_ring_cons = RCQ_BD(sw_comp_cons);
1416 bd_prod = RX_BD(bd_prod);
1417 bd_cons = RX_BD(bd_cons);
1419 cqe = &fp->rx_comp_ring[comp_ring_cons];
1420 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1422 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1423 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1424 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1425 cqe->fast_path_cqe.rss_hash_result,
1426 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1427 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1429 /* is this a slowpath msg? */
1430 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1431 bnx2x_sp_event(fp, cqe);
1434 /* this is an rx packet */
1436 rx_buf = &fp->rx_buf_ring[bd_cons];
1438 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1439 pad = cqe->fast_path_cqe.placement_offset;
1441 /* If CQE is marked both TPA_START and TPA_END
1442 it is a non-TPA CQE */
1443 if ((!fp->disable_tpa) &&
1444 (TPA_TYPE(cqe_fp_flags) !=
1445 (TPA_TYPE_START | TPA_TYPE_END))) {
1446 queue = cqe->fast_path_cqe.queue_index;
1448 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1449 DP(NETIF_MSG_RX_STATUS,
1450 "calling tpa_start on queue %d\n",
1453 bnx2x_tpa_start(fp, queue, skb,
1458 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1459 DP(NETIF_MSG_RX_STATUS,
1460 "calling tpa_stop on queue %d\n",
1463 if (!BNX2X_RX_SUM_FIX(cqe))
1464 BNX2X_ERR("STOP on none TCP "
1467 /* This is a size of the linear data
1469 len = le16_to_cpu(cqe->fast_path_cqe.
1471 bnx2x_tpa_stop(bp, fp, queue, pad,
1472 len, cqe, comp_ring_cons);
1473 #ifdef BNX2X_STOP_ON_ERROR
1478 bnx2x_update_sge_prod(fp,
1479 &cqe->fast_path_cqe);
1484 pci_dma_sync_single_for_device(bp->pdev,
1485 pci_unmap_addr(rx_buf, mapping),
1486 pad + RX_COPY_THRESH,
1487 PCI_DMA_FROMDEVICE);
1489 prefetch(((char *)(skb)) + 128);
1491 /* is this an error packet? */
1492 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1493 DP(NETIF_MSG_RX_ERR,
1494 "ERROR flags %x rx packet %u\n",
1495 cqe_fp_flags, sw_comp_cons);
1496 bp->eth_stats.rx_err_discard_pkt++;
1500 /* Since we don't have a jumbo ring
1501 * copy small packets if mtu > 1500
1503 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1504 (len <= RX_COPY_THRESH)) {
1505 struct sk_buff *new_skb;
1507 new_skb = netdev_alloc_skb(bp->dev,
1509 if (new_skb == NULL) {
1510 DP(NETIF_MSG_RX_ERR,
1511 "ERROR packet dropped "
1512 "because of alloc failure\n");
1513 bp->eth_stats.rx_skb_alloc_failed++;
1518 skb_copy_from_linear_data_offset(skb, pad,
1519 new_skb->data + pad, len);
1520 skb_reserve(new_skb, pad);
1521 skb_put(new_skb, len);
1523 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1527 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1528 pci_unmap_single(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 bp->rx_buf_use_size,
1531 PCI_DMA_FROMDEVICE);
1532 skb_reserve(skb, pad);
1536 DP(NETIF_MSG_RX_ERR,
1537 "ERROR packet dropped because "
1538 "of alloc failure\n");
1539 bp->eth_stats.rx_skb_alloc_failed++;
1541 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1545 skb->protocol = eth_type_trans(skb, bp->dev);
1547 skb->ip_summed = CHECKSUM_NONE;
1549 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1550 skb->ip_summed = CHECKSUM_UNNECESSARY;
1552 bp->eth_stats.hw_csum_err++;
1557 if ((bp->vlgrp != NULL) &&
1558 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1559 PARSING_FLAGS_VLAN))
1560 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1561 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1564 netif_receive_skb(skb);
1566 bp->dev->last_rx = jiffies;
1571 bd_cons = NEXT_RX_IDX(bd_cons);
1572 bd_prod = NEXT_RX_IDX(bd_prod);
1573 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1576 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1577 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1579 if (rx_pkt == budget)
1583 fp->rx_bd_cons = bd_cons;
1584 fp->rx_bd_prod = bd_prod_fw;
1585 fp->rx_comp_cons = sw_comp_cons;
1586 fp->rx_comp_prod = sw_comp_prod;
1588 /* Update producers */
1589 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1591 mmiowb(); /* keep prod updates ordered */
1593 fp->rx_pkt += rx_pkt;
1599 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1601 struct bnx2x_fastpath *fp = fp_cookie;
1602 struct bnx2x *bp = fp->bp;
1603 struct net_device *dev = bp->dev;
1604 int index = FP_IDX(fp);
1606 /* Return here if interrupt is disabled */
1607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1612 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1613 index, FP_SB_ID(fp));
1614 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1616 #ifdef BNX2X_STOP_ON_ERROR
1617 if (unlikely(bp->panic))
1621 prefetch(fp->rx_cons_sb);
1622 prefetch(fp->tx_cons_sb);
1623 prefetch(&fp->status_blk->c_status_block.status_block_index);
1624 prefetch(&fp->status_blk->u_status_block.status_block_index);
1626 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1631 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1633 struct net_device *dev = dev_instance;
1634 struct bnx2x *bp = netdev_priv(dev);
1635 u16 status = bnx2x_ack_int(bp);
1638 /* Return here if interrupt is shared and it's not for us */
1639 if (unlikely(status == 0)) {
1640 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1643 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1645 #ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1650 /* Return here if interrupt is disabled */
1651 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1652 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656 mask = 0x2 << bp->fp[0].sb_id;
1657 if (status & mask) {
1658 struct bnx2x_fastpath *fp = &bp->fp[0];
1660 prefetch(fp->rx_cons_sb);
1661 prefetch(fp->tx_cons_sb);
1662 prefetch(&fp->status_blk->c_status_block.status_block_index);
1663 prefetch(&fp->status_blk->u_status_block.status_block_index);
1665 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1671 if (unlikely(status & 0x1)) {
1672 schedule_work(&bp->sp_task);
1680 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1686 /* end of fast path */
1688 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1693 * General service functions
1696 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1699 u32 resource_bit = (1 << resource);
1700 u8 port = BP_PORT(bp);
1703 /* Validating that the resource is within range */
1704 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1706 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1707 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1711 /* Validating that the resource is not already taken */
1712 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1713 if (lock_status & resource_bit) {
1714 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1715 lock_status, resource_bit);
1719 /* Try for 1 second every 5ms */
1720 for (cnt = 0; cnt < 200; cnt++) {
1721 /* Try to acquire the lock */
1722 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1724 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1725 if (lock_status & resource_bit)
1730 DP(NETIF_MSG_HW, "Timeout\n");
1734 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1737 u32 resource_bit = (1 << resource);
1738 u8 port = BP_PORT(bp);
1740 /* Validating that the resource is within range */
1741 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1744 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1748 /* Validating that the resource is currently taken */
1749 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1750 if (!(lock_status & resource_bit)) {
1751 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1752 lock_status, resource_bit);
1756 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1760 /* HW Lock for shared dual port PHYs */
1761 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1763 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1765 mutex_lock(&bp->port.phy_mutex);
1767 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1768 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1769 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1772 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1774 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1776 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1777 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1778 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1780 mutex_unlock(&bp->port.phy_mutex);
1783 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1785 /* The GPIO should be swapped if swap register is set and active */
1786 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1787 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1788 int gpio_shift = gpio_num +
1789 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1790 u32 gpio_mask = (1 << gpio_shift);
1793 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1794 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1798 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1799 /* read GPIO and mask except the float bits */
1800 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1803 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1804 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1805 gpio_num, gpio_shift);
1806 /* clear FLOAT and set CLR */
1807 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1808 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1811 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set SET */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1819 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1821 gpio_num, gpio_shift);
1823 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1830 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1831 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1836 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1838 u32 spio_mask = (1 << spio_num);
1841 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1842 (spio_num > MISC_REGISTERS_SPIO_7)) {
1843 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1847 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1848 /* read SPIO and mask except the float bits */
1849 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1852 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1853 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1854 /* clear FLOAT and set CLR */
1855 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1856 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1859 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1860 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1861 /* clear FLOAT and set SET */
1862 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1863 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1866 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1867 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1869 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1876 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1877 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1882 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1884 switch (bp->link_vars.ieee_fc) {
1885 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1886 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1889 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1890 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1894 bp->port.advertising |= ADVERTISED_Asym_Pause;
1897 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1903 static void bnx2x_link_report(struct bnx2x *bp)
1905 if (bp->link_vars.link_up) {
1906 if (bp->state == BNX2X_STATE_OPEN)
1907 netif_carrier_on(bp->dev);
1908 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1910 printk("%d Mbps ", bp->link_vars.line_speed);
1912 if (bp->link_vars.duplex == DUPLEX_FULL)
1913 printk("full duplex");
1915 printk("half duplex");
1917 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1918 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1919 printk(", receive ");
1920 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1921 printk("& transmit ");
1923 printk(", transmit ");
1925 printk("flow control ON");
1929 } else { /* link_down */
1930 netif_carrier_off(bp->dev);
1931 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1935 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1937 if (!BP_NOMCP(bp)) {
1940 /* Initialize link parameters structure variables */
1941 bp->link_params.mtu = bp->dev->mtu;
1943 bnx2x_phy_hw_lock(bp);
1944 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1945 bnx2x_phy_hw_unlock(bp);
1947 if (bp->link_vars.link_up)
1948 bnx2x_link_report(bp);
1950 bnx2x_calc_fc_adv(bp);
1954 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1958 static void bnx2x_link_set(struct bnx2x *bp)
1960 if (!BP_NOMCP(bp)) {
1961 bnx2x_phy_hw_lock(bp);
1962 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1963 bnx2x_phy_hw_unlock(bp);
1965 bnx2x_calc_fc_adv(bp);
1967 BNX2X_ERR("Bootcode is missing -not setting link\n");
1970 static void bnx2x__link_reset(struct bnx2x *bp)
1972 if (!BP_NOMCP(bp)) {
1973 bnx2x_phy_hw_lock(bp);
1974 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1975 bnx2x_phy_hw_unlock(bp);
1977 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1980 static u8 bnx2x_link_test(struct bnx2x *bp)
1984 bnx2x_phy_hw_lock(bp);
1985 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1986 bnx2x_phy_hw_unlock(bp);
1991 /* Calculates the sum of vn_min_rates.
1992 It's needed for further normalizing of the min_rates.
1997 0 - if all the min_rates are 0.
1998 In the later case fainess algorithm should be deactivated.
1999 If not all min_rates are zero then those that are zeroes will
2002 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2004 int i, port = BP_PORT(bp);
2008 for (i = 0; i < E1HVN_MAX; i++) {
2010 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2011 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2012 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2013 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2014 /* If min rate is zero - set it to 1 */
2016 vn_min_rate = DEF_MIN_RATE;
2020 wsum += vn_min_rate;
2024 /* ... only if all min rates are zeros - disable FAIRNESS */
2031 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2034 struct cmng_struct_per_port *m_cmng_port)
2036 u32 r_param = port_rate / 8;
2037 int port = BP_PORT(bp);
2040 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2042 /* Enable minmax only if we are in e1hmf mode */
2044 u32 fair_periodic_timeout_usec;
2047 /* Enable rate shaping and fairness */
2048 m_cmng_port->flags.cmng_vn_enable = 1;
2049 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2050 m_cmng_port->flags.rate_shaping_enable = 1;
2053 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2054 " fairness will be disabled\n");
2056 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2057 m_cmng_port->rs_vars.rs_periodic_timeout =
2058 RS_PERIODIC_TIMEOUT_USEC / 4;
2060 /* this is the threshold below which no timer arming will occur
2061 1.25 coefficient is for the threshold to be a little bigger
2062 than the real time, to compensate for timer in-accuracy */
2063 m_cmng_port->rs_vars.rs_threshold =
2064 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2066 /* resolution of fairness timer */
2067 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2068 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2069 t_fair = T_FAIR_COEF / port_rate;
2071 /* this is the threshold below which we won't arm
2072 the timer anymore */
2073 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2075 /* we multiply by 1e3/8 to get bytes/msec.
2076 We don't want the credits to pass a credit
2077 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2078 m_cmng_port->fair_vars.upper_bound =
2079 r_param * t_fair * FAIR_MEM;
2080 /* since each tick is 4 usec */
2081 m_cmng_port->fair_vars.fairness_timeout =
2082 fair_periodic_timeout_usec / 4;
2085 /* Disable rate shaping and fairness */
2086 m_cmng_port->flags.cmng_vn_enable = 0;
2087 m_cmng_port->flags.fairness_enable = 0;
2088 m_cmng_port->flags.rate_shaping_enable = 0;
2091 "Single function mode minmax will be disabled\n");
2094 /* Store it to internal memory */
2095 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2096 REG_WR(bp, BAR_XSTRORM_INTMEM +
2097 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2098 ((u32 *)(m_cmng_port))[i]);
2101 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2102 u32 wsum, u16 port_rate,
2103 struct cmng_struct_per_port *m_cmng_port)
2105 struct rate_shaping_vars_per_vn m_rs_vn;
2106 struct fairness_vars_per_vn m_fair_vn;
2107 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2108 u16 vn_min_rate, vn_max_rate;
2111 /* If function is hidden - set min and max to zeroes */
2112 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2117 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2118 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2119 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2120 if current min rate is zero - set it to 1.
2121 This is a requirment of the algorithm. */
2122 if ((vn_min_rate == 0) && wsum)
2123 vn_min_rate = DEF_MIN_RATE;
2124 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2125 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2128 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2129 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2131 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2132 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2134 /* global vn counter - maximal Mbps for this vn */
2135 m_rs_vn.vn_counter.rate = vn_max_rate;
2137 /* quota - number of bytes transmitted in this period */
2138 m_rs_vn.vn_counter.quota =
2139 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2141 #ifdef BNX2X_PER_PROT_QOS
2142 /* per protocol counter */
2143 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2144 /* maximal Mbps for this protocol */
2145 m_rs_vn.protocol_counters[protocol].rate =
2146 protocol_max_rate[protocol];
2147 /* the quota in each timer period -
2148 number of bytes transmitted in this period */
2149 m_rs_vn.protocol_counters[protocol].quota =
2150 (u32)(rs_periodic_timeout_usec *
2152 protocol_counters[protocol].rate/8));
2157 /* credit for each period of the fairness algorithm:
2158 number of bytes in T_FAIR (the vn share the port rate).
2159 wsum should not be larger than 10000, thus
2160 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2161 m_fair_vn.vn_credit_delta =
2162 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2163 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2164 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2165 m_fair_vn.vn_credit_delta);
2168 #ifdef BNX2X_PER_PROT_QOS
2170 u32 protocolWeightSum = 0;
2172 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2173 protocolWeightSum +=
2174 drvInit.protocol_min_rate[protocol];
2175 /* per protocol counter -
2176 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2177 if (protocolWeightSum > 0) {
2179 protocol < NUM_OF_PROTOCOLS; protocol++)
2180 /* credit for each period of the
2181 fairness algorithm - number of bytes in
2182 T_FAIR (the protocol share the vn rate) */
2183 m_fair_vn.protocol_credit_delta[protocol] =
2184 (u32)((vn_min_rate / 8) * t_fair *
2185 protocol_min_rate / protocolWeightSum);
2190 /* Store it to internal memory */
2191 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2192 REG_WR(bp, BAR_XSTRORM_INTMEM +
2193 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2194 ((u32 *)(&m_rs_vn))[i]);
2196 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2197 REG_WR(bp, BAR_XSTRORM_INTMEM +
2198 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2199 ((u32 *)(&m_fair_vn))[i]);
2202 /* This function is called upon link interrupt */
2203 static void bnx2x_link_attn(struct bnx2x *bp)
2207 /* Make sure that we are synced with the current statistics */
2208 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2210 bnx2x_phy_hw_lock(bp);
2211 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2212 bnx2x_phy_hw_unlock(bp);
2214 if (bp->link_vars.link_up) {
2216 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2217 struct host_port_stats *pstats;
2219 pstats = bnx2x_sp(bp, port_stats);
2220 /* reset old bmac stats */
2221 memset(&(pstats->mac_stx[0]), 0,
2222 sizeof(struct mac_stx));
2224 if ((bp->state == BNX2X_STATE_OPEN) ||
2225 (bp->state == BNX2X_STATE_DISABLED))
2226 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2229 /* indicate link status */
2230 bnx2x_link_report(bp);
2235 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2236 if (vn == BP_E1HVN(bp))
2239 func = ((vn << 1) | BP_PORT(bp));
2241 /* Set the attention towards other drivers
2243 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2244 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2248 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2249 struct cmng_struct_per_port m_cmng_port;
2251 int port = BP_PORT(bp);
2253 /* Init RATE SHAPING and FAIRNESS contexts */
2254 wsum = bnx2x_calc_vn_wsum(bp);
2255 bnx2x_init_port_minmax(bp, (int)wsum,
2256 bp->link_vars.line_speed,
2259 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2260 bnx2x_init_vn_minmax(bp, 2*vn + port,
2261 wsum, bp->link_vars.line_speed,
2266 static void bnx2x__link_status_update(struct bnx2x *bp)
2268 if (bp->state != BNX2X_STATE_OPEN)
2271 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2273 if (bp->link_vars.link_up)
2274 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2276 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2278 /* indicate link status */
2279 bnx2x_link_report(bp);
2282 static void bnx2x_pmf_update(struct bnx2x *bp)
2284 int port = BP_PORT(bp);
2288 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2290 /* enable nig attention */
2291 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2292 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2293 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2295 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2303 * General service functions
2306 /* the slow path queue is odd since completions arrive on the fastpath ring */
2307 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2308 u32 data_hi, u32 data_lo, int common)
2310 int func = BP_FUNC(bp);
2312 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2313 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2314 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2315 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2316 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2318 #ifdef BNX2X_STOP_ON_ERROR
2319 if (unlikely(bp->panic))
2323 spin_lock_bh(&bp->spq_lock);
2325 if (!bp->spq_left) {
2326 BNX2X_ERR("BUG! SPQ ring full!\n");
2327 spin_unlock_bh(&bp->spq_lock);
2332 /* CID needs port number to be encoded int it */
2333 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2334 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2336 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2338 bp->spq_prod_bd->hdr.type |=
2339 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2341 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2342 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2346 if (bp->spq_prod_bd == bp->spq_last_bd) {
2347 bp->spq_prod_bd = bp->spq;
2348 bp->spq_prod_idx = 0;
2349 DP(NETIF_MSG_TIMER, "end of spq\n");
2356 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2359 spin_unlock_bh(&bp->spq_lock);
2363 /* acquire split MCP access lock register */
2364 static int bnx2x_lock_alr(struct bnx2x *bp)
2371 for (j = 0; j < i*10; j++) {
2373 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2374 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2375 if (val & (1L << 31))
2380 if (!(val & (1L << 31))) {
2381 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2388 /* Release split MCP access lock register */
2389 static void bnx2x_unlock_alr(struct bnx2x *bp)
2393 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2396 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2398 struct host_def_status_block *def_sb = bp->def_status_blk;
2401 barrier(); /* status block is written to by the chip */
2403 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2404 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2407 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2408 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2411 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2412 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2415 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2416 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2419 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2420 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2427 * slow path service functions
2430 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2432 int port = BP_PORT(bp);
2433 int func = BP_FUNC(bp);
2434 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2435 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2436 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2437 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2438 NIG_REG_MASK_INTERRUPT_PORT0;
2440 if (~bp->aeu_mask & (asserted & 0xff))
2441 BNX2X_ERR("IGU ERROR\n");
2442 if (bp->attn_state & asserted)
2443 BNX2X_ERR("IGU ERROR\n");
2445 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2446 bp->aeu_mask, asserted);
2447 bp->aeu_mask &= ~(asserted & 0xff);
2448 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2450 REG_WR(bp, aeu_addr, bp->aeu_mask);
2452 bp->attn_state |= asserted;
2454 if (asserted & ATTN_HARD_WIRED_MASK) {
2455 if (asserted & ATTN_NIG_FOR_FUNC) {
2457 /* save nig interrupt mask */
2458 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2459 REG_WR(bp, nig_int_mask_addr, 0);
2461 bnx2x_link_attn(bp);
2463 /* handle unicore attn? */
2465 if (asserted & ATTN_SW_TIMER_4_FUNC)
2466 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2468 if (asserted & GPIO_2_FUNC)
2469 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2471 if (asserted & GPIO_3_FUNC)
2472 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2474 if (asserted & GPIO_4_FUNC)
2475 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2478 if (asserted & ATTN_GENERAL_ATTN_1) {
2479 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2480 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2482 if (asserted & ATTN_GENERAL_ATTN_2) {
2483 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2484 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2486 if (asserted & ATTN_GENERAL_ATTN_3) {
2487 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2488 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2491 if (asserted & ATTN_GENERAL_ATTN_4) {
2492 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2493 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2495 if (asserted & ATTN_GENERAL_ATTN_5) {
2496 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2497 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2499 if (asserted & ATTN_GENERAL_ATTN_6) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2505 } /* if hardwired */
2507 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2508 asserted, BAR_IGU_INTMEM + igu_addr);
2509 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2511 /* now set back the mask */
2512 if (asserted & ATTN_NIG_FOR_FUNC)
2513 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2516 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2518 int port = BP_PORT(bp);
2522 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2523 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2525 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2527 val = REG_RD(bp, reg_offset);
2528 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2529 REG_WR(bp, reg_offset, val);
2531 BNX2X_ERR("SPIO5 hw attention\n");
2533 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2534 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2535 /* Fan failure attention */
2537 /* The PHY reset is controled by GPIO 1 */
2538 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2539 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2540 /* Low power mode is controled by GPIO 2 */
2541 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2542 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2543 /* mark the failure */
2544 bp->link_params.ext_phy_config &=
2545 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2546 bp->link_params.ext_phy_config |=
2547 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2549 dev_info.port_hw_config[port].
2550 external_phy_config,
2551 bp->link_params.ext_phy_config);
2552 /* log the failure */
2553 printk(KERN_ERR PFX "Fan Failure on Network"
2554 " Controller %s has caused the driver to"
2555 " shutdown the card to prevent permanent"
2556 " damage. Please contact Dell Support for"
2557 " assistance\n", bp->dev->name);
2565 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2567 val = REG_RD(bp, reg_offset);
2568 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2569 REG_WR(bp, reg_offset, val);
2571 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2572 (attn & HW_INTERRUT_ASSERT_SET_0));
2577 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2581 if (attn & BNX2X_DOORQ_ASSERT) {
2583 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2584 BNX2X_ERR("DB hw attention 0x%x\n", val);
2585 /* DORQ discard attention */
2587 BNX2X_ERR("FATAL error from DORQ\n");
2590 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2592 int port = BP_PORT(bp);
2595 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2596 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2598 val = REG_RD(bp, reg_offset);
2599 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2600 REG_WR(bp, reg_offset, val);
2602 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2603 (attn & HW_INTERRUT_ASSERT_SET_1));
2608 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2612 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2614 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2615 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2616 /* CFC error attention */
2618 BNX2X_ERR("FATAL error from CFC\n");
2621 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2623 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2624 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2625 /* RQ_USDMDP_FIFO_OVERFLOW */
2627 BNX2X_ERR("FATAL error from PXP\n");
2630 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2632 int port = BP_PORT(bp);
2635 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2636 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2638 val = REG_RD(bp, reg_offset);
2639 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2640 REG_WR(bp, reg_offset, val);
2642 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2643 (attn & HW_INTERRUT_ASSERT_SET_2));
2648 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2652 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2654 if (attn & BNX2X_PMF_LINK_ASSERT) {
2655 int func = BP_FUNC(bp);
2657 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2658 bnx2x__link_status_update(bp);
2659 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2661 bnx2x_pmf_update(bp);
2663 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2665 BNX2X_ERR("MC assert!\n");
2666 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2667 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2668 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2669 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2672 } else if (attn & BNX2X_MCP_ASSERT) {
2674 BNX2X_ERR("MCP assert!\n");
2675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2679 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2682 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2683 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2684 if (attn & BNX2X_GRC_TIMEOUT) {
2685 val = CHIP_IS_E1H(bp) ?
2686 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2687 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2689 if (attn & BNX2X_GRC_RSV) {
2690 val = CHIP_IS_E1H(bp) ?
2691 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2692 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2694 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2698 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2700 struct attn_route attn;
2701 struct attn_route group_mask;
2702 int port = BP_PORT(bp);
2707 /* need to take HW lock because MCP or other port might also
2708 try to handle this event */
2711 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2712 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2713 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2714 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2715 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2716 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2718 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2719 if (deasserted & (1 << index)) {
2720 group_mask = bp->attn_group[index];
2722 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2723 index, group_mask.sig[0], group_mask.sig[1],
2724 group_mask.sig[2], group_mask.sig[3]);
2726 bnx2x_attn_int_deasserted3(bp,
2727 attn.sig[3] & group_mask.sig[3]);
2728 bnx2x_attn_int_deasserted1(bp,
2729 attn.sig[1] & group_mask.sig[1]);
2730 bnx2x_attn_int_deasserted2(bp,
2731 attn.sig[2] & group_mask.sig[2]);
2732 bnx2x_attn_int_deasserted0(bp,
2733 attn.sig[0] & group_mask.sig[0]);
2735 if ((attn.sig[0] & group_mask.sig[0] &
2736 HW_PRTY_ASSERT_SET_0) ||
2737 (attn.sig[1] & group_mask.sig[1] &
2738 HW_PRTY_ASSERT_SET_1) ||
2739 (attn.sig[2] & group_mask.sig[2] &
2740 HW_PRTY_ASSERT_SET_2))
2741 BNX2X_ERR("FATAL HW block parity attention\n");
2745 bnx2x_unlock_alr(bp);
2747 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2750 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2751 val, BAR_IGU_INTMEM + reg_addr); */
2752 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2754 if (bp->aeu_mask & (deasserted & 0xff))
2755 BNX2X_ERR("IGU BUG!\n");
2756 if (~bp->attn_state & deasserted)
2757 BNX2X_ERR("IGU BUG!\n");
2759 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2760 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2762 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2763 bp->aeu_mask |= (deasserted & 0xff);
2765 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2766 REG_WR(bp, reg_addr, bp->aeu_mask);
2768 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2769 bp->attn_state &= ~deasserted;
2770 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2773 static void bnx2x_attn_int(struct bnx2x *bp)
2775 /* read local copy of bits */
2776 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2777 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2778 u32 attn_state = bp->attn_state;
2780 /* look for changed bits */
2781 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2782 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2785 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2786 attn_bits, attn_ack, asserted, deasserted);
2788 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2789 BNX2X_ERR("BAD attention state\n");
2791 /* handle bits that were raised */
2793 bnx2x_attn_int_asserted(bp, asserted);
2796 bnx2x_attn_int_deasserted(bp, deasserted);
2799 static void bnx2x_sp_task(struct work_struct *work)
2801 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2805 /* Return here if interrupt is disabled */
2806 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2807 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2811 status = bnx2x_update_dsb_idx(bp);
2812 /* if (status == 0) */
2813 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2815 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2821 /* CStorm events: query_stats, port delete ramrod */
2823 bp->stats_pending = 0;
2825 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2827 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2829 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2831 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2833 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2838 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2840 struct net_device *dev = dev_instance;
2841 struct bnx2x *bp = netdev_priv(dev);
2843 /* Return here if interrupt is disabled */
2844 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2845 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2849 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2851 #ifdef BNX2X_STOP_ON_ERROR
2852 if (unlikely(bp->panic))
2856 schedule_work(&bp->sp_task);
2861 /* end of slow path */
2865 /****************************************************************************
2867 ****************************************************************************/
2869 /* sum[hi:lo] += add[hi:lo] */
2870 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2873 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2876 /* difference = minuend - subtrahend */
2877 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2879 if (m_lo < s_lo) { \
2881 d_hi = m_hi - s_hi; \
2883 /* we can 'loan' 1 */ \
2885 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2887 /* m_hi <= s_hi */ \
2892 /* m_lo >= s_lo */ \
2893 if (m_hi < s_hi) { \
2897 /* m_hi >= s_hi */ \
2898 d_hi = m_hi - s_hi; \
2899 d_lo = m_lo - s_lo; \
2904 #define UPDATE_STAT64(s, t) \
2906 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2907 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2908 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2909 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2910 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2911 pstats->mac_stx[1].t##_lo, diff.lo); \
2914 #define UPDATE_STAT64_NIG(s, t) \
2916 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2917 diff.lo, new->s##_lo, old->s##_lo); \
2918 ADD_64(estats->t##_hi, diff.hi, \
2919 estats->t##_lo, diff.lo); \
2922 /* sum[hi:lo] += add */
2923 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2926 s_hi += (s_lo < a) ? 1 : 0; \
2929 #define UPDATE_EXTEND_STAT(s) \
2931 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2932 pstats->mac_stx[1].s##_lo, \
2936 #define UPDATE_EXTEND_TSTAT(s, t) \
2938 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2939 old_tclient->s = le32_to_cpu(tclient->s); \
2940 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2943 #define UPDATE_EXTEND_XSTAT(s, t) \
2945 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2946 old_xclient->s = le32_to_cpu(xclient->s); \
2947 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2951 * General service functions
2954 static inline long bnx2x_hilo(u32 *hiref)
2956 u32 lo = *(hiref + 1);
2957 #if (BITS_PER_LONG == 64)
2960 return HILO_U64(hi, lo);
2967 * Init service functions
2970 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2972 if (!bp->stats_pending) {
2973 struct eth_query_ramrod_data ramrod_data = {0};
2976 ramrod_data.drv_counter = bp->stats_counter++;
2977 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2978 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2980 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2981 ((u32 *)&ramrod_data)[1],
2982 ((u32 *)&ramrod_data)[0], 0);
2984 /* stats ramrod has it's own slot on the spq */
2986 bp->stats_pending = 1;
2991 static void bnx2x_stats_init(struct bnx2x *bp)
2993 int port = BP_PORT(bp);
2995 bp->executer_idx = 0;
2996 bp->stats_counter = 0;
3000 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3002 bp->port.port_stx = 0;
3003 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3005 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3006 bp->port.old_nig_stats.brb_discard =
3007 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3008 bp->port.old_nig_stats.brb_truncate =
3009 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3010 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3011 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3012 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3013 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3015 /* function stats */
3016 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3017 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3018 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3019 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3021 bp->stats_state = STATS_STATE_DISABLED;
3022 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3023 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3026 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3028 struct dmae_command *dmae = &bp->stats_dmae;
3029 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3031 *stats_comp = DMAE_COMP_VAL;
3034 if (bp->executer_idx) {
3035 int loader_idx = PMF_DMAE_C(bp);
3037 memset(dmae, 0, sizeof(struct dmae_command));
3039 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3040 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3041 DMAE_CMD_DST_RESET |
3043 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3045 DMAE_CMD_ENDIANITY_DW_SWAP |
3047 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3049 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3050 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3051 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3052 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3053 sizeof(struct dmae_command) *
3054 (loader_idx + 1)) >> 2;
3055 dmae->dst_addr_hi = 0;
3056 dmae->len = sizeof(struct dmae_command) >> 2;
3059 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3060 dmae->comp_addr_hi = 0;
3064 bnx2x_post_dmae(bp, dmae, loader_idx);
3066 } else if (bp->func_stx) {
3068 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3072 static int bnx2x_stats_comp(struct bnx2x *bp)
3074 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3078 while (*stats_comp != DMAE_COMP_VAL) {
3081 BNX2X_ERR("timeout waiting for stats finished\n");
3090 * Statistics service functions
3093 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3095 struct dmae_command *dmae;
3097 int loader_idx = PMF_DMAE_C(bp);
3098 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3101 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3102 BNX2X_ERR("BUG!\n");
3106 bp->executer_idx = 0;
3108 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3110 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3112 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3114 DMAE_CMD_ENDIANITY_DW_SWAP |
3116 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3117 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3119 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3120 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3121 dmae->src_addr_lo = bp->port.port_stx >> 2;
3122 dmae->src_addr_hi = 0;
3123 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3124 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3125 dmae->len = DMAE_LEN32_RD_MAX;
3126 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3127 dmae->comp_addr_hi = 0;
3130 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3131 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3132 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3133 dmae->src_addr_hi = 0;
3134 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3135 DMAE_LEN32_RD_MAX * 4);
3136 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3137 DMAE_LEN32_RD_MAX * 4);
3138 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3139 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3140 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3141 dmae->comp_val = DMAE_COMP_VAL;
3144 bnx2x_hw_stats_post(bp);
3145 bnx2x_stats_comp(bp);
3148 static void bnx2x_port_stats_init(struct bnx2x *bp)
3150 struct dmae_command *dmae;
3151 int port = BP_PORT(bp);
3152 int vn = BP_E1HVN(bp);
3154 int loader_idx = PMF_DMAE_C(bp);
3156 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3159 if (!bp->link_vars.link_up || !bp->port.pmf) {
3160 BNX2X_ERR("BUG!\n");
3164 bp->executer_idx = 0;
3167 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3168 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3169 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3171 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3173 DMAE_CMD_ENDIANITY_DW_SWAP |
3175 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3176 (vn << DMAE_CMD_E1HVN_SHIFT));
3178 if (bp->port.port_stx) {
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = opcode;
3182 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3183 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3184 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3185 dmae->dst_addr_hi = 0;
3186 dmae->len = sizeof(struct host_port_stats) >> 2;
3187 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188 dmae->comp_addr_hi = 0;
3194 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3195 dmae->opcode = opcode;
3196 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3197 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3198 dmae->dst_addr_lo = bp->func_stx >> 2;
3199 dmae->dst_addr_hi = 0;
3200 dmae->len = sizeof(struct host_func_stats) >> 2;
3201 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3202 dmae->comp_addr_hi = 0;
3207 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3208 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3209 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3211 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3213 DMAE_CMD_ENDIANITY_DW_SWAP |
3215 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3216 (vn << DMAE_CMD_E1HVN_SHIFT));
3218 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3220 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3221 NIG_REG_INGRESS_BMAC0_MEM);
3223 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3224 BIGMAC_REGISTER_TX_STAT_GTBYT */
3225 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3226 dmae->opcode = opcode;
3227 dmae->src_addr_lo = (mac_addr +
3228 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3229 dmae->src_addr_hi = 0;
3230 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3231 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3232 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3233 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3234 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3235 dmae->comp_addr_hi = 0;
3238 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3239 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3240 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3241 dmae->opcode = opcode;
3242 dmae->src_addr_lo = (mac_addr +
3243 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3244 dmae->src_addr_hi = 0;
3245 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3246 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3248 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3249 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3250 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3255 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3257 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3259 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3260 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3261 dmae->opcode = opcode;
3262 dmae->src_addr_lo = (mac_addr +
3263 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3264 dmae->src_addr_hi = 0;
3265 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3266 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3267 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3268 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3269 dmae->comp_addr_hi = 0;
3272 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = (mac_addr +
3276 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3277 dmae->src_addr_hi = 0;
3278 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3279 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3280 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3281 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3283 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3284 dmae->comp_addr_hi = 0;
3287 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3288 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3289 dmae->opcode = opcode;
3290 dmae->src_addr_lo = (mac_addr +
3291 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3292 dmae->src_addr_hi = 0;
3293 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3294 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3295 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3296 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3297 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3298 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3299 dmae->comp_addr_hi = 0;
3304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305 dmae->opcode = opcode;
3306 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3307 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3308 dmae->src_addr_hi = 0;
3309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3310 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3311 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3316 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3317 dmae->opcode = opcode;
3318 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3319 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3320 dmae->src_addr_hi = 0;
3321 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3322 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3323 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3324 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3325 dmae->len = (2*sizeof(u32)) >> 2;
3326 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3327 dmae->comp_addr_hi = 0;
3330 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3331 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3332 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3333 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3335 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3337 DMAE_CMD_ENDIANITY_DW_SWAP |
3339 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3340 (vn << DMAE_CMD_E1HVN_SHIFT));
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3342 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3348 dmae->len = (2*sizeof(u32)) >> 2;
3349 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3350 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3351 dmae->comp_val = DMAE_COMP_VAL;
3356 static void bnx2x_func_stats_init(struct bnx2x *bp)
3358 struct dmae_command *dmae = &bp->stats_dmae;
3359 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3362 if (!bp->func_stx) {
3363 BNX2X_ERR("BUG!\n");
3367 bp->executer_idx = 0;
3368 memset(dmae, 0, sizeof(struct dmae_command));
3370 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3371 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3372 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3374 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3376 DMAE_CMD_ENDIANITY_DW_SWAP |
3378 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3379 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3380 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3381 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3382 dmae->dst_addr_lo = bp->func_stx >> 2;
3383 dmae->dst_addr_hi = 0;
3384 dmae->len = sizeof(struct host_func_stats) >> 2;
3385 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3386 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3387 dmae->comp_val = DMAE_COMP_VAL;
3392 static void bnx2x_stats_start(struct bnx2x *bp)
3395 bnx2x_port_stats_init(bp);
3397 else if (bp->func_stx)
3398 bnx2x_func_stats_init(bp);
3400 bnx2x_hw_stats_post(bp);
3401 bnx2x_storm_stats_post(bp);
3404 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3406 bnx2x_stats_comp(bp);
3407 bnx2x_stats_pmf_update(bp);
3408 bnx2x_stats_start(bp);
3411 static void bnx2x_stats_restart(struct bnx2x *bp)
3413 bnx2x_stats_comp(bp);
3414 bnx2x_stats_start(bp);
3417 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3419 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3420 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3421 struct regpair diff;
3423 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3424 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3425 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3426 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3427 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3428 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3429 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3430 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3431 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3432 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3433 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3434 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3435 UPDATE_STAT64(tx_stat_gt127,
3436 tx_stat_etherstatspkts65octetsto127octets);
3437 UPDATE_STAT64(tx_stat_gt255,
3438 tx_stat_etherstatspkts128octetsto255octets);
3439 UPDATE_STAT64(tx_stat_gt511,
3440 tx_stat_etherstatspkts256octetsto511octets);
3441 UPDATE_STAT64(tx_stat_gt1023,
3442 tx_stat_etherstatspkts512octetsto1023octets);
3443 UPDATE_STAT64(tx_stat_gt1518,
3444 tx_stat_etherstatspkts1024octetsto1522octets);
3445 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3446 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3447 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3448 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3449 UPDATE_STAT64(tx_stat_gterr,
3450 tx_stat_dot3statsinternalmactransmiterrors);
3451 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3454 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3456 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3457 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3459 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3460 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3461 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3462 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3463 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3464 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3465 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3466 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3467 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3468 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3469 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3470 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3471 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3472 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3473 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3474 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3475 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3476 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3477 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3478 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3479 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3480 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3481 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3482 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3483 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3484 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3485 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3486 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3487 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3488 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3489 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3492 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3494 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3495 struct nig_stats *old = &(bp->port.old_nig_stats);
3496 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3497 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3498 struct regpair diff;
3500 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3501 bnx2x_bmac_stats_update(bp);
3503 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3504 bnx2x_emac_stats_update(bp);
3506 else { /* unreached */
3507 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3511 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3512 new->brb_discard - old->brb_discard);
3513 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3514 new->brb_truncate - old->brb_truncate);
3516 UPDATE_STAT64_NIG(egress_mac_pkt0,
3517 etherstatspkts1024octetsto1522octets);
3518 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3520 memcpy(old, new, sizeof(struct nig_stats));
3522 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3523 sizeof(struct mac_stx));
3524 estats->brb_drop_hi = pstats->brb_drop_hi;
3525 estats->brb_drop_lo = pstats->brb_drop_lo;
3527 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3532 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3534 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3535 int cl_id = BP_CL_ID(bp);
3536 struct tstorm_per_port_stats *tport =
3537 &stats->tstorm_common.port_statistics;
3538 struct tstorm_per_client_stats *tclient =
3539 &stats->tstorm_common.client_statistics[cl_id];
3540 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3541 struct xstorm_per_client_stats *xclient =
3542 &stats->xstorm_common.client_statistics[cl_id];
3543 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3544 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3545 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548 /* are storm stats valid? */
3549 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3550 bp->stats_counter) {
3551 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3552 " tstorm counter (%d) != stats_counter (%d)\n",
3553 tclient->stats_counter, bp->stats_counter);
3556 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3557 bp->stats_counter) {
3558 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3559 " xstorm counter (%d) != stats_counter (%d)\n",
3560 xclient->stats_counter, bp->stats_counter);
3564 fstats->total_bytes_received_hi =
3565 fstats->valid_bytes_received_hi =
3566 le32_to_cpu(tclient->total_rcv_bytes.hi);
3567 fstats->total_bytes_received_lo =
3568 fstats->valid_bytes_received_lo =
3569 le32_to_cpu(tclient->total_rcv_bytes.lo);
3571 estats->error_bytes_received_hi =
3572 le32_to_cpu(tclient->rcv_error_bytes.hi);
3573 estats->error_bytes_received_lo =
3574 le32_to_cpu(tclient->rcv_error_bytes.lo);
3575 ADD_64(estats->error_bytes_received_hi,
3576 estats->rx_stat_ifhcinbadoctets_hi,
3577 estats->error_bytes_received_lo,
3578 estats->rx_stat_ifhcinbadoctets_lo);
3580 ADD_64(fstats->total_bytes_received_hi,
3581 estats->error_bytes_received_hi,
3582 fstats->total_bytes_received_lo,
3583 estats->error_bytes_received_lo);
3585 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3586 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3587 total_multicast_packets_received);
3588 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3589 total_broadcast_packets_received);
3591 fstats->total_bytes_transmitted_hi =
3592 le32_to_cpu(xclient->total_sent_bytes.hi);
3593 fstats->total_bytes_transmitted_lo =
3594 le32_to_cpu(xclient->total_sent_bytes.lo);
3596 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3597 total_unicast_packets_transmitted);
3598 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3599 total_multicast_packets_transmitted);
3600 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3601 total_broadcast_packets_transmitted);
3603 memcpy(estats, &(fstats->total_bytes_received_hi),
3604 sizeof(struct host_func_stats) - 2*sizeof(u32));
3606 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3607 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3608 estats->brb_truncate_discard =
3609 le32_to_cpu(tport->brb_truncate_discard);
3610 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3612 old_tclient->rcv_unicast_bytes.hi =
3613 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3614 old_tclient->rcv_unicast_bytes.lo =
3615 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3616 old_tclient->rcv_broadcast_bytes.hi =
3617 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3618 old_tclient->rcv_broadcast_bytes.lo =
3619 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3620 old_tclient->rcv_multicast_bytes.hi =
3621 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3622 old_tclient->rcv_multicast_bytes.lo =
3623 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3624 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3626 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3627 old_tclient->packets_too_big_discard =
3628 le32_to_cpu(tclient->packets_too_big_discard);
3629 estats->no_buff_discard =
3630 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3631 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3633 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3634 old_xclient->unicast_bytes_sent.hi =
3635 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3636 old_xclient->unicast_bytes_sent.lo =
3637 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3638 old_xclient->multicast_bytes_sent.hi =
3639 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3640 old_xclient->multicast_bytes_sent.lo =
3641 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3642 old_xclient->broadcast_bytes_sent.hi =
3643 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3644 old_xclient->broadcast_bytes_sent.lo =
3645 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3647 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3652 static void bnx2x_net_stats_update(struct bnx2x *bp)
3654 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3656 struct net_device_stats *nstats = &bp->dev->stats;
3658 nstats->rx_packets =
3659 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3660 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3661 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3663 nstats->tx_packets =
3664 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3665 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3666 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3668 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3670 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3672 nstats->rx_dropped = old_tclient->checksum_discard +
3673 estats->mac_discard;
3674 nstats->tx_dropped = 0;
3677 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3679 nstats->collisions =
3680 estats->tx_stat_dot3statssinglecollisionframes_lo +
3681 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3682 estats->tx_stat_dot3statslatecollisions_lo +
3683 estats->tx_stat_dot3statsexcessivecollisions_lo;
3685 estats->jabber_packets_received =
3686 old_tclient->packets_too_big_discard +
3687 estats->rx_stat_dot3statsframestoolong_lo;
3689 nstats->rx_length_errors =
3690 estats->rx_stat_etherstatsundersizepkts_lo +
3691 estats->jabber_packets_received;
3692 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3693 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3694 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3695 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3696 nstats->rx_missed_errors = estats->xxoverflow_discard;
3698 nstats->rx_errors = nstats->rx_length_errors +
3699 nstats->rx_over_errors +
3700 nstats->rx_crc_errors +
3701 nstats->rx_frame_errors +
3702 nstats->rx_fifo_errors +
3703 nstats->rx_missed_errors;
3705 nstats->tx_aborted_errors =
3706 estats->tx_stat_dot3statslatecollisions_lo +
3707 estats->tx_stat_dot3statsexcessivecollisions_lo;
3708 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3709 nstats->tx_fifo_errors = 0;
3710 nstats->tx_heartbeat_errors = 0;
3711 nstats->tx_window_errors = 0;
3713 nstats->tx_errors = nstats->tx_aborted_errors +
3714 nstats->tx_carrier_errors;
3717 static void bnx2x_stats_update(struct bnx2x *bp)
3719 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3722 if (*stats_comp != DMAE_COMP_VAL)
3726 update = (bnx2x_hw_stats_update(bp) == 0);
3728 update |= (bnx2x_storm_stats_update(bp) == 0);
3731 bnx2x_net_stats_update(bp);
3734 if (bp->stats_pending) {
3735 bp->stats_pending++;
3736 if (bp->stats_pending == 3) {
3737 BNX2X_ERR("stats not updated for 3 times\n");
3744 if (bp->msglevel & NETIF_MSG_TIMER) {
3745 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3746 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3747 struct net_device_stats *nstats = &bp->dev->stats;
3750 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3751 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3753 bnx2x_tx_avail(bp->fp),
3754 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3755 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3757 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3758 bp->fp->rx_comp_cons),
3759 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3760 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3761 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3762 estats->driver_xoff, estats->brb_drop_lo);
3763 printk(KERN_DEBUG "tstats: checksum_discard %u "
3764 "packets_too_big_discard %u no_buff_discard %u "
3765 "mac_discard %u mac_filter_discard %u "
3766 "xxovrflow_discard %u brb_truncate_discard %u "
3767 "ttl0_discard %u\n",
3768 old_tclient->checksum_discard,
3769 old_tclient->packets_too_big_discard,
3770 old_tclient->no_buff_discard, estats->mac_discard,
3771 estats->mac_filter_discard, estats->xxoverflow_discard,
3772 estats->brb_truncate_discard,
3773 old_tclient->ttl0_discard);
3775 for_each_queue(bp, i) {
3776 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3777 bnx2x_fp(bp, i, tx_pkt),
3778 bnx2x_fp(bp, i, rx_pkt),
3779 bnx2x_fp(bp, i, rx_calls));
3783 bnx2x_hw_stats_post(bp);
3784 bnx2x_storm_stats_post(bp);
3787 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3789 struct dmae_command *dmae;
3791 int loader_idx = PMF_DMAE_C(bp);
3792 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3794 bp->executer_idx = 0;
3796 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3798 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3800 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3802 DMAE_CMD_ENDIANITY_DW_SWAP |
3804 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3805 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3807 if (bp->port.port_stx) {
3809 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3811 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3813 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3814 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3815 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3816 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3817 dmae->dst_addr_hi = 0;
3818 dmae->len = sizeof(struct host_port_stats) >> 2;
3820 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3821 dmae->comp_addr_hi = 0;
3824 dmae->comp_addr_lo =
3825 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3826 dmae->comp_addr_hi =
3827 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3828 dmae->comp_val = DMAE_COMP_VAL;
3836 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3837 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3839 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3840 dmae->dst_addr_lo = bp->func_stx >> 2;
3841 dmae->dst_addr_hi = 0;
3842 dmae->len = sizeof(struct host_func_stats) >> 2;
3843 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3844 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3845 dmae->comp_val = DMAE_COMP_VAL;
3851 static void bnx2x_stats_stop(struct bnx2x *bp)
3855 bnx2x_stats_comp(bp);
3858 update = (bnx2x_hw_stats_update(bp) == 0);
3860 update |= (bnx2x_storm_stats_update(bp) == 0);
3863 bnx2x_net_stats_update(bp);
3866 bnx2x_port_stats_stop(bp);
3868 bnx2x_hw_stats_post(bp);
3869 bnx2x_stats_comp(bp);
3873 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3877 static const struct {
3878 void (*action)(struct bnx2x *bp);
3879 enum bnx2x_stats_state next_state;
3880 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3883 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3884 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3885 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3886 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3889 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3890 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3891 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3892 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3896 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3898 enum bnx2x_stats_state state = bp->stats_state;
3900 bnx2x_stats_stm[state][event].action(bp);
3901 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3903 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3904 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3905 state, event, bp->stats_state);
3908 static void bnx2x_timer(unsigned long data)
3910 struct bnx2x *bp = (struct bnx2x *) data;
3912 if (!netif_running(bp->dev))
3915 if (atomic_read(&bp->intr_sem) != 0)
3919 struct bnx2x_fastpath *fp = &bp->fp[0];
3922 bnx2x_tx_int(fp, 1000);
3923 rc = bnx2x_rx_int(fp, 1000);
3926 if (!BP_NOMCP(bp)) {
3927 int func = BP_FUNC(bp);
3931 ++bp->fw_drv_pulse_wr_seq;
3932 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3933 /* TBD - add SYSTEM_TIME */
3934 drv_pulse = bp->fw_drv_pulse_wr_seq;
3935 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3937 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3938 MCP_PULSE_SEQ_MASK);
3939 /* The delta between driver pulse and mcp response
3940 * should be 1 (before mcp response) or 0 (after mcp response)
3942 if ((drv_pulse != mcp_pulse) &&
3943 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3944 /* someone lost a heartbeat... */
3945 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3946 drv_pulse, mcp_pulse);
3950 if ((bp->state == BNX2X_STATE_OPEN) ||
3951 (bp->state == BNX2X_STATE_DISABLED))
3952 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3955 mod_timer(&bp->timer, jiffies + bp->current_interval);
3958 /* end of Statistics */
3963 * nic init service functions
3966 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3968 int port = BP_PORT(bp);
3970 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3971 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3972 sizeof(struct ustorm_def_status_block)/4);
3973 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3974 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3975 sizeof(struct cstorm_def_status_block)/4);
3978 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3979 struct host_status_block *sb, dma_addr_t mapping)
3981 int port = BP_PORT(bp);
3982 int func = BP_FUNC(bp);
3987 section = ((u64)mapping) + offsetof(struct host_status_block,
3989 sb->u_status_block.status_block_id = sb_id;
3991 REG_WR(bp, BAR_USTRORM_INTMEM +
3992 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
3993 REG_WR(bp, BAR_USTRORM_INTMEM +
3994 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
3996 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
3997 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
3999 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4000 REG_WR16(bp, BAR_USTRORM_INTMEM +
4001 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4004 section = ((u64)mapping) + offsetof(struct host_status_block,
4006 sb->c_status_block.status_block_id = sb_id;
4008 REG_WR(bp, BAR_CSTRORM_INTMEM +
4009 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4010 REG_WR(bp, BAR_CSTRORM_INTMEM +
4011 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4013 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4014 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4016 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4017 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4018 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4020 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4023 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4025 int func = BP_FUNC(bp);
4027 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4028 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4029 sizeof(struct ustorm_def_status_block)/4);
4030 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4031 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4032 sizeof(struct cstorm_def_status_block)/4);
4033 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4034 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4035 sizeof(struct xstorm_def_status_block)/4);
4036 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4037 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4038 sizeof(struct tstorm_def_status_block)/4);
4041 static void bnx2x_init_def_sb(struct bnx2x *bp,
4042 struct host_def_status_block *def_sb,
4043 dma_addr_t mapping, int sb_id)
4045 int port = BP_PORT(bp);
4046 int func = BP_FUNC(bp);
4047 int index, val, reg_offset;
4051 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4052 atten_status_block);
4053 def_sb->atten_status_block.status_block_id = sb_id;
4055 bp->def_att_idx = 0;
4058 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4059 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4061 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4062 bp->attn_group[index].sig[0] = REG_RD(bp,
4063 reg_offset + 0x10*index);
4064 bp->attn_group[index].sig[1] = REG_RD(bp,
4065 reg_offset + 0x4 + 0x10*index);
4066 bp->attn_group[index].sig[2] = REG_RD(bp,
4067 reg_offset + 0x8 + 0x10*index);
4068 bp->attn_group[index].sig[3] = REG_RD(bp,
4069 reg_offset + 0xc + 0x10*index);
4072 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4073 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4075 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4076 HC_REG_ATTN_MSG0_ADDR_L);
4078 REG_WR(bp, reg_offset, U64_LO(section));
4079 REG_WR(bp, reg_offset + 4, U64_HI(section));
4081 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4083 val = REG_RD(bp, reg_offset);
4085 REG_WR(bp, reg_offset, val);
4088 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4089 u_def_status_block);
4090 def_sb->u_def_status_block.status_block_id = sb_id;
4094 REG_WR(bp, BAR_USTRORM_INTMEM +
4095 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4096 REG_WR(bp, BAR_USTRORM_INTMEM +
4097 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4099 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4100 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4101 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4104 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4105 REG_WR16(bp, BAR_USTRORM_INTMEM +
4106 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4109 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4110 c_def_status_block);
4111 def_sb->c_def_status_block.status_block_id = sb_id;
4115 REG_WR(bp, BAR_CSTRORM_INTMEM +
4116 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4117 REG_WR(bp, BAR_CSTRORM_INTMEM +
4118 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4120 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4121 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4122 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4125 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4126 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4127 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4130 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4131 t_def_status_block);
4132 def_sb->t_def_status_block.status_block_id = sb_id;
4136 REG_WR(bp, BAR_TSTRORM_INTMEM +
4137 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4138 REG_WR(bp, BAR_TSTRORM_INTMEM +
4139 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4141 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4142 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4143 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4146 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4147 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4148 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4151 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152 x_def_status_block);
4153 def_sb->x_def_status_block.status_block_id = sb_id;
4157 REG_WR(bp, BAR_XSTRORM_INTMEM +
4158 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4159 REG_WR(bp, BAR_XSTRORM_INTMEM +
4160 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4162 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4163 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4164 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4167 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4168 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4169 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4171 bp->stats_pending = 0;
4172 bp->set_mac_pending = 0;
4174 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4177 static void bnx2x_update_coalesce(struct bnx2x *bp)
4179 int port = BP_PORT(bp);
4182 for_each_queue(bp, i) {
4183 int sb_id = bp->fp[i].sb_id;
4185 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4186 REG_WR8(bp, BAR_USTRORM_INTMEM +
4187 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4188 HC_INDEX_U_ETH_RX_CQ_CONS),
4190 REG_WR16(bp, BAR_USTRORM_INTMEM +
4191 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4192 HC_INDEX_U_ETH_RX_CQ_CONS),
4193 bp->rx_ticks ? 0 : 1);
4195 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4196 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4197 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4198 HC_INDEX_C_ETH_TX_CQ_CONS),
4200 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4201 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4202 HC_INDEX_C_ETH_TX_CQ_CONS),
4203 bp->tx_ticks ? 0 : 1);
4207 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4208 struct bnx2x_fastpath *fp, int last)
4212 for (i = 0; i < last; i++) {
4213 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4214 struct sk_buff *skb = rx_buf->skb;
4217 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4221 if (fp->tpa_state[i] == BNX2X_TPA_START)
4222 pci_unmap_single(bp->pdev,
4223 pci_unmap_addr(rx_buf, mapping),
4224 bp->rx_buf_use_size,
4225 PCI_DMA_FROMDEVICE);
4232 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4234 int func = BP_FUNC(bp);
4235 u16 ring_prod, cqe_ring_prod = 0;
4238 bp->rx_buf_use_size = bp->dev->mtu;
4239 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4240 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4242 if (bp->flags & TPA_ENABLE_FLAG) {
4244 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4245 bp->rx_buf_use_size, bp->rx_buf_size,
4246 bp->dev->mtu + ETH_OVREHEAD);
4248 for_each_queue(bp, j) {
4249 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4250 struct bnx2x_fastpath *fp = &bp->fp[j];
4252 fp->tpa_pool[i].skb =
4253 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4254 if (!fp->tpa_pool[i].skb) {
4255 BNX2X_ERR("Failed to allocate TPA "
4256 "skb pool for queue[%d] - "
4257 "disabling TPA on this "
4259 bnx2x_free_tpa_pool(bp, fp, i);
4260 fp->disable_tpa = 1;
4263 pci_unmap_addr_set((struct sw_rx_bd *)
4264 &bp->fp->tpa_pool[i],
4266 fp->tpa_state[i] = BNX2X_TPA_STOP;
4271 for_each_queue(bp, j) {
4272 struct bnx2x_fastpath *fp = &bp->fp[j];
4275 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4276 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4278 /* "next page" elements initialization */
4280 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4281 struct eth_rx_sge *sge;
4283 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4285 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4286 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4288 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4289 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4292 bnx2x_init_sge_ring_bit_mask(fp);
4295 for (i = 1; i <= NUM_RX_RINGS; i++) {
4296 struct eth_rx_bd *rx_bd;
4298 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4300 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4301 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4303 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4304 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4308 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4309 struct eth_rx_cqe_next_page *nextpg;
4311 nextpg = (struct eth_rx_cqe_next_page *)
4312 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4314 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4315 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4317 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4318 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4321 /* Allocate SGEs and initialize the ring elements */
4322 for (i = 0, ring_prod = 0;
4323 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4325 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4326 BNX2X_ERR("was only able to allocate "
4328 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4329 /* Cleanup already allocated elements */
4330 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4331 bnx2x_free_tpa_pool(bp, fp,
4332 ETH_MAX_AGGREGATION_QUEUES_E1H);
4333 fp->disable_tpa = 1;
4337 ring_prod = NEXT_SGE_IDX(ring_prod);
4339 fp->rx_sge_prod = ring_prod;
4341 /* Allocate BDs and initialize BD ring */
4342 fp->rx_comp_cons = 0;
4343 cqe_ring_prod = ring_prod = 0;
4344 for (i = 0; i < bp->rx_ring_size; i++) {
4345 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4346 BNX2X_ERR("was only able to allocate "
4348 bp->eth_stats.rx_skb_alloc_failed++;
4351 ring_prod = NEXT_RX_IDX(ring_prod);
4352 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4353 WARN_ON(ring_prod <= i);
4356 fp->rx_bd_prod = ring_prod;
4357 /* must not have more available CQEs than BDs */
4358 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4360 fp->rx_pkt = fp->rx_calls = 0;
4363 * this will generate an interrupt (to the TSTORM)
4364 * must only be done after chip is initialized
4366 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4371 REG_WR(bp, BAR_USTRORM_INTMEM +
4372 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4373 U64_LO(fp->rx_comp_mapping));
4374 REG_WR(bp, BAR_USTRORM_INTMEM +
4375 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4376 U64_HI(fp->rx_comp_mapping));
4380 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4384 for_each_queue(bp, j) {
4385 struct bnx2x_fastpath *fp = &bp->fp[j];
4387 for (i = 1; i <= NUM_TX_RINGS; i++) {
4388 struct eth_tx_bd *tx_bd =
4389 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4392 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4393 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4395 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4396 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4399 fp->tx_pkt_prod = 0;
4400 fp->tx_pkt_cons = 0;
4403 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4408 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4410 int func = BP_FUNC(bp);
4412 spin_lock_init(&bp->spq_lock);
4414 bp->spq_left = MAX_SPQ_PENDING;
4415 bp->spq_prod_idx = 0;
4416 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4417 bp->spq_prod_bd = bp->spq;
4418 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4420 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4421 U64_LO(bp->spq_mapping));
4423 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4424 U64_HI(bp->spq_mapping));
4426 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4430 static void bnx2x_init_context(struct bnx2x *bp)
4434 for_each_queue(bp, i) {
4435 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4436 struct bnx2x_fastpath *fp = &bp->fp[i];
4437 u8 sb_id = FP_SB_ID(fp);
4439 context->xstorm_st_context.tx_bd_page_base_hi =
4440 U64_HI(fp->tx_desc_mapping);
4441 context->xstorm_st_context.tx_bd_page_base_lo =
4442 U64_LO(fp->tx_desc_mapping);
4443 context->xstorm_st_context.db_data_addr_hi =
4444 U64_HI(fp->tx_prods_mapping);
4445 context->xstorm_st_context.db_data_addr_lo =
4446 U64_LO(fp->tx_prods_mapping);
4447 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4448 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4450 context->ustorm_st_context.common.sb_index_numbers =
4451 BNX2X_RX_SB_INDEX_NUM;
4452 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4453 context->ustorm_st_context.common.status_block_id = sb_id;
4454 context->ustorm_st_context.common.flags =
4455 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4456 context->ustorm_st_context.common.mc_alignment_size = 64;
4457 context->ustorm_st_context.common.bd_buff_size =
4458 bp->rx_buf_use_size;
4459 context->ustorm_st_context.common.bd_page_base_hi =
4460 U64_HI(fp->rx_desc_mapping);
4461 context->ustorm_st_context.common.bd_page_base_lo =
4462 U64_LO(fp->rx_desc_mapping);
4463 if (!fp->disable_tpa) {
4464 context->ustorm_st_context.common.flags |=
4465 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4466 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4467 context->ustorm_st_context.common.sge_buff_size =
4468 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4469 context->ustorm_st_context.common.sge_page_base_hi =
4470 U64_HI(fp->rx_sge_mapping);
4471 context->ustorm_st_context.common.sge_page_base_lo =
4472 U64_LO(fp->rx_sge_mapping);
4475 context->cstorm_st_context.sb_index_number =
4476 HC_INDEX_C_ETH_TX_CQ_CONS;
4477 context->cstorm_st_context.status_block_id = sb_id;
4479 context->xstorm_ag_context.cdu_reserved =
4480 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4481 CDU_REGION_NUMBER_XCM_AG,
4482 ETH_CONNECTION_TYPE);
4483 context->ustorm_ag_context.cdu_usage =
4484 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4485 CDU_REGION_NUMBER_UCM_AG,
4486 ETH_CONNECTION_TYPE);
4490 static void bnx2x_init_ind_table(struct bnx2x *bp)
4492 int port = BP_PORT(bp);
4498 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4499 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4500 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4501 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4502 i % bp->num_queues);
4504 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4507 static void bnx2x_set_client_config(struct bnx2x *bp)
4509 struct tstorm_eth_client_config tstorm_client = {0};
4510 int port = BP_PORT(bp);
4513 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4514 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4515 tstorm_client.config_flags =
4516 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4518 if (bp->rx_mode && bp->vlgrp) {
4519 tstorm_client.config_flags |=
4520 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4521 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4525 if (bp->flags & TPA_ENABLE_FLAG) {
4526 tstorm_client.max_sges_for_packet =
4527 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4528 tstorm_client.max_sges_for_packet =
4529 ((tstorm_client.max_sges_for_packet +
4530 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4531 PAGES_PER_SGE_SHIFT;
4533 tstorm_client.config_flags |=
4534 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4537 for_each_queue(bp, i) {
4538 REG_WR(bp, BAR_TSTRORM_INTMEM +
4539 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4540 ((u32 *)&tstorm_client)[0]);
4541 REG_WR(bp, BAR_TSTRORM_INTMEM +
4542 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4543 ((u32 *)&tstorm_client)[1]);
4546 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4547 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4550 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4552 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4553 int mode = bp->rx_mode;
4554 int mask = (1 << BP_L_ID(bp));
4555 int func = BP_FUNC(bp);
4558 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4561 case BNX2X_RX_MODE_NONE: /* no Rx */
4562 tstorm_mac_filter.ucast_drop_all = mask;
4563 tstorm_mac_filter.mcast_drop_all = mask;
4564 tstorm_mac_filter.bcast_drop_all = mask;
4566 case BNX2X_RX_MODE_NORMAL:
4567 tstorm_mac_filter.bcast_accept_all = mask;
4569 case BNX2X_RX_MODE_ALLMULTI:
4570 tstorm_mac_filter.mcast_accept_all = mask;
4571 tstorm_mac_filter.bcast_accept_all = mask;
4573 case BNX2X_RX_MODE_PROMISC:
4574 tstorm_mac_filter.ucast_accept_all = mask;
4575 tstorm_mac_filter.mcast_accept_all = mask;
4576 tstorm_mac_filter.bcast_accept_all = mask;
4579 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4583 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4584 REG_WR(bp, BAR_TSTRORM_INTMEM +
4585 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4586 ((u32 *)&tstorm_mac_filter)[i]);
4588 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4589 ((u32 *)&tstorm_mac_filter)[i]); */
4592 if (mode != BNX2X_RX_MODE_NONE)
4593 bnx2x_set_client_config(bp);
4596 static void bnx2x_init_internal_common(struct bnx2x *bp)
4600 /* Zero this manually as its initialization is
4601 currently missing in the initTool */
4602 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4603 REG_WR(bp, BAR_USTRORM_INTMEM +
4604 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4607 static void bnx2x_init_internal_port(struct bnx2x *bp)
4609 int port = BP_PORT(bp);
4611 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4612 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4613 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4614 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4617 static void bnx2x_init_internal_func(struct bnx2x *bp)
4619 struct tstorm_eth_function_common_config tstorm_config = {0};
4620 struct stats_indication_flags stats_flags = {0};
4621 int port = BP_PORT(bp);
4622 int func = BP_FUNC(bp);
4627 tstorm_config.config_flags = MULTI_FLAGS;
4628 tstorm_config.rss_result_mask = MULTI_MASK;
4631 tstorm_config.leading_client_id = BP_L_ID(bp);
4633 REG_WR(bp, BAR_TSTRORM_INTMEM +
4634 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4635 (*(u32 *)&tstorm_config));
4637 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4638 bnx2x_set_storm_rx_mode(bp);
4640 /* reset xstorm per client statistics */
4641 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4642 REG_WR(bp, BAR_XSTRORM_INTMEM +
4643 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4646 /* reset tstorm per client statistics */
4647 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4648 REG_WR(bp, BAR_TSTRORM_INTMEM +
4649 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4653 /* Init statistics related context */
4654 stats_flags.collect_eth = 1;
4656 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4657 ((u32 *)&stats_flags)[0]);
4658 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4659 ((u32 *)&stats_flags)[1]);
4661 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4662 ((u32 *)&stats_flags)[0]);
4663 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4664 ((u32 *)&stats_flags)[1]);
4666 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4667 ((u32 *)&stats_flags)[0]);
4668 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4669 ((u32 *)&stats_flags)[1]);
4671 REG_WR(bp, BAR_XSTRORM_INTMEM +
4672 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4673 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4674 REG_WR(bp, BAR_XSTRORM_INTMEM +
4675 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4676 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4678 REG_WR(bp, BAR_TSTRORM_INTMEM +
4679 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4680 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4681 REG_WR(bp, BAR_TSTRORM_INTMEM +
4682 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4683 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685 if (CHIP_IS_E1H(bp)) {
4686 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4688 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4690 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4692 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4695 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4699 /* Init CQ ring mapping and aggregation size */
4700 max_agg_size = min((u32)(bp->rx_buf_use_size +
4701 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4703 for_each_queue(bp, i) {
4704 struct bnx2x_fastpath *fp = &bp->fp[i];
4706 REG_WR(bp, BAR_USTRORM_INTMEM +
4707 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4708 U64_LO(fp->rx_comp_mapping));
4709 REG_WR(bp, BAR_USTRORM_INTMEM +
4710 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4711 U64_HI(fp->rx_comp_mapping));
4713 REG_WR16(bp, BAR_USTRORM_INTMEM +
4714 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4719 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4721 switch (load_code) {
4722 case FW_MSG_CODE_DRV_LOAD_COMMON:
4723 bnx2x_init_internal_common(bp);
4726 case FW_MSG_CODE_DRV_LOAD_PORT:
4727 bnx2x_init_internal_port(bp);
4730 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4731 bnx2x_init_internal_func(bp);
4735 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4740 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4744 for_each_queue(bp, i) {
4745 struct bnx2x_fastpath *fp = &bp->fp[i];
4748 fp->state = BNX2X_FP_STATE_CLOSED;
4750 fp->cl_id = BP_L_ID(bp) + i;
4751 fp->sb_id = fp->cl_id;
4753 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4754 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4755 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4756 fp->status_blk_mapping);
4759 bnx2x_init_def_sb(bp, bp->def_status_blk,
4760 bp->def_status_blk_mapping, DEF_SB_ID);
4761 bnx2x_update_coalesce(bp);
4762 bnx2x_init_rx_rings(bp);
4763 bnx2x_init_tx_ring(bp);
4764 bnx2x_init_sp_ring(bp);
4765 bnx2x_init_context(bp);
4766 bnx2x_init_internal(bp, load_code);
4767 bnx2x_init_ind_table(bp);
4768 bnx2x_int_enable(bp);
4771 /* end of nic init */
4774 * gzip service functions
4777 static int bnx2x_gunzip_init(struct bnx2x *bp)
4779 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4780 &bp->gunzip_mapping);
4781 if (bp->gunzip_buf == NULL)
4784 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4785 if (bp->strm == NULL)
4788 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4790 if (bp->strm->workspace == NULL)
4800 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4801 bp->gunzip_mapping);
4802 bp->gunzip_buf = NULL;
4805 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4806 " un-compression\n", bp->dev->name);
4810 static void bnx2x_gunzip_end(struct bnx2x *bp)
4812 kfree(bp->strm->workspace);
4817 if (bp->gunzip_buf) {
4818 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4819 bp->gunzip_mapping);
4820 bp->gunzip_buf = NULL;
4824 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4828 /* check gzip header */
4829 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4836 if (zbuf[3] & FNAME)
4837 while ((zbuf[n++] != 0) && (n < len));
4839 bp->strm->next_in = zbuf + n;
4840 bp->strm->avail_in = len - n;
4841 bp->strm->next_out = bp->gunzip_buf;
4842 bp->strm->avail_out = FW_BUF_SIZE;
4844 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4848 rc = zlib_inflate(bp->strm, Z_FINISH);
4849 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4850 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4851 bp->dev->name, bp->strm->msg);
4853 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4854 if (bp->gunzip_outlen & 0x3)
4855 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4856 " gunzip_outlen (%d) not aligned\n",
4857 bp->dev->name, bp->gunzip_outlen);
4858 bp->gunzip_outlen >>= 2;
4860 zlib_inflateEnd(bp->strm);
4862 if (rc == Z_STREAM_END)
4868 /* nic load/unload */
4871 * General service functions
4874 /* send a NIG loopback debug packet */
4875 static void bnx2x_lb_pckt(struct bnx2x *bp)
4879 /* Ethernet source and destination addresses */
4880 wb_write[0] = 0x55555555;
4881 wb_write[1] = 0x55555555;
4882 wb_write[2] = 0x20; /* SOP */
4883 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4885 /* NON-IP protocol */
4886 wb_write[0] = 0x09000000;
4887 wb_write[1] = 0x55555555;
4888 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4889 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4892 /* some of the internal memories
4893 * are not directly readable from the driver
4894 * to test them we send debug packets
4896 static int bnx2x_int_mem_test(struct bnx2x *bp)
4902 if (CHIP_REV_IS_FPGA(bp))
4904 else if (CHIP_REV_IS_EMUL(bp))
4909 DP(NETIF_MSG_HW, "start part1\n");
4911 /* Disable inputs of parser neighbor blocks */
4912 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4913 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4914 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4915 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4917 /* Write 0 to parser credits for CFC search request */
4918 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4920 /* send Ethernet packet */
4923 /* TODO do i reset NIG statistic? */
4924 /* Wait until NIG register shows 1 packet of size 0x10 */
4925 count = 1000 * factor;
4928 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4929 val = *bnx2x_sp(bp, wb_data[0]);
4937 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4941 /* Wait until PRS register shows 1 packet */
4942 count = 1000 * factor;
4944 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4952 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4956 /* Reset and init BRB, PRS */
4957 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4959 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4961 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4962 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4964 DP(NETIF_MSG_HW, "part2\n");
4966 /* Disable inputs of parser neighbor blocks */
4967 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4968 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4969 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4970 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4972 /* Write 0 to parser credits for CFC search request */
4973 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4975 /* send 10 Ethernet packets */
4976 for (i = 0; i < 10; i++)
4979 /* Wait until NIG register shows 10 + 1
4980 packets of size 11*0x10 = 0xb0 */
4981 count = 1000 * factor;
4984 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4985 val = *bnx2x_sp(bp, wb_data[0]);
4993 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4997 /* Wait until PRS register shows 2 packets */
4998 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5000 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5002 /* Write 1 to parser credits for CFC search request */
5003 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5005 /* Wait until PRS register shows 3 packets */
5006 msleep(10 * factor);
5007 /* Wait until NIG register shows 1 packet of size 0x10 */
5008 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5010 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5012 /* clear NIG EOP FIFO */
5013 for (i = 0; i < 11; i++)
5014 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5015 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5017 BNX2X_ERR("clear of NIG failed\n");
5021 /* Reset and init BRB, PRS, NIG */
5022 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5024 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5026 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5027 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5030 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5033 /* Enable inputs of parser neighbor blocks */
5034 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5035 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5036 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5037 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5039 DP(NETIF_MSG_HW, "done\n");
5044 static void enable_blocks_attention(struct bnx2x *bp)
5046 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5047 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5048 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5049 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5050 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5051 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5052 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5053 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5054 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5055 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5056 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5057 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5058 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5059 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5060 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5061 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5062 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5063 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5064 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5065 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5066 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5067 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5068 if (CHIP_REV_IS_FPGA(bp))
5069 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5071 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5072 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5073 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5074 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5075 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5076 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5077 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5078 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5079 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5080 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5084 static int bnx2x_init_common(struct bnx2x *bp)
5088 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5090 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5093 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5094 if (CHIP_IS_E1H(bp))
5095 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5097 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5099 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5101 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5102 if (CHIP_IS_E1(bp)) {
5103 /* enable HW interrupt from PXP on USDM overflow
5104 bit 16 on INT_MASK_0 */
5105 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5108 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5112 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5113 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5114 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5115 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5116 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5117 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5119 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5120 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5121 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5122 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5123 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5128 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5131 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5133 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5134 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5135 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5138 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5139 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5141 /* let the HW do it's magic ... */
5143 /* finish PXP init */
5144 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5146 BNX2X_ERR("PXP2 CFG failed\n");
5149 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5151 BNX2X_ERR("PXP2 RD_INIT failed\n");
5155 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5156 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5158 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5160 /* clean the DMAE memory */
5162 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5164 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5165 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5166 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5167 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5169 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5170 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5171 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5172 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5174 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5175 /* soft reset pulse */
5176 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5177 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5180 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5183 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5184 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5185 if (!CHIP_REV_IS_SLOW(bp)) {
5186 /* enable hw interrupt from doorbell Q */
5187 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5190 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5191 if (CHIP_REV_IS_SLOW(bp)) {
5192 /* fix for emulation and FPGA for no pause */
5193 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5194 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5195 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5196 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5199 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5200 if (CHIP_IS_E1H(bp))
5201 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5203 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5204 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5205 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5206 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5208 if (CHIP_IS_E1H(bp)) {
5209 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5210 STORM_INTMEM_SIZE_E1H/2);
5212 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5213 0, STORM_INTMEM_SIZE_E1H/2);
5214 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5215 STORM_INTMEM_SIZE_E1H/2);
5217 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5218 0, STORM_INTMEM_SIZE_E1H/2);
5219 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5220 STORM_INTMEM_SIZE_E1H/2);
5222 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5223 0, STORM_INTMEM_SIZE_E1H/2);
5224 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5225 STORM_INTMEM_SIZE_E1H/2);
5227 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5228 0, STORM_INTMEM_SIZE_E1H/2);
5230 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5231 STORM_INTMEM_SIZE_E1);
5232 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5233 STORM_INTMEM_SIZE_E1);
5234 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5235 STORM_INTMEM_SIZE_E1);
5236 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5237 STORM_INTMEM_SIZE_E1);
5240 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5241 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5242 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5243 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5248 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5251 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5252 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5253 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5255 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5256 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5257 REG_WR(bp, i, 0xc0cac01a);
5258 /* TODO: replace with something meaningful */
5260 if (CHIP_IS_E1H(bp))
5261 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5262 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5264 if (sizeof(union cdu_context) != 1024)
5265 /* we currently assume that a context is 1024 bytes */
5266 printk(KERN_ALERT PFX "please adjust the size of"
5267 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5269 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5270 val = (4 << 24) + (0 << 12) + 1024;
5271 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5272 if (CHIP_IS_E1(bp)) {
5273 /* !!! fix pxp client crdit until excel update */
5274 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5275 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5278 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5279 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5281 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5282 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5284 /* PXPCS COMMON comes here */
5285 /* Reset PCIE errors for debug */
5286 REG_WR(bp, 0x2814, 0xffffffff);
5287 REG_WR(bp, 0x3820, 0xffffffff);
5289 /* EMAC0 COMMON comes here */
5290 /* EMAC1 COMMON comes here */
5291 /* DBU COMMON comes here */
5292 /* DBG COMMON comes here */
5294 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5295 if (CHIP_IS_E1H(bp)) {
5296 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5297 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5300 if (CHIP_REV_IS_SLOW(bp))
5303 /* finish CFC init */
5304 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5306 BNX2X_ERR("CFC LL_INIT failed\n");
5309 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5311 BNX2X_ERR("CFC AC_INIT failed\n");
5314 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5316 BNX2X_ERR("CFC CAM_INIT failed\n");
5319 REG_WR(bp, CFC_REG_DEBUG0, 0);
5321 /* read NIG statistic
5322 to see if this is our first up since powerup */
5323 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5324 val = *bnx2x_sp(bp, wb_data[0]);
5326 /* do internal memory self test */
5327 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5328 BNX2X_ERR("internal mem self test failed\n");
5332 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5333 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5334 /* Fan failure is indicated by SPIO 5 */
5335 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5336 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5338 /* set to active low mode */
5339 val = REG_RD(bp, MISC_REG_SPIO_INT);
5340 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5341 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5342 REG_WR(bp, MISC_REG_SPIO_INT, val);
5344 /* enable interrupt to signal the IGU */
5345 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5346 val |= (1 << MISC_REGISTERS_SPIO_5);
5347 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5354 /* clear PXP2 attentions */
5355 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5357 enable_blocks_attention(bp);
5359 if (bp->flags & TPA_ENABLE_FLAG) {
5360 struct tstorm_eth_tpa_exist tmp = {0};
5364 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5366 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5373 static int bnx2x_init_port(struct bnx2x *bp)
5375 int port = BP_PORT(bp);
5378 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5380 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5382 /* Port PXP comes here */
5383 /* Port PXP2 comes here */
5388 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5389 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5390 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5391 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5396 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5397 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5398 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5399 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5404 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5405 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5406 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5409 /* Port CMs come here */
5411 /* Port QM comes here */
5413 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5414 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5416 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5417 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5419 /* Port DQ comes here */
5420 /* Port BRB1 comes here */
5421 /* Port PRS comes here */
5422 /* Port TSDM comes here */
5423 /* Port CSDM comes here */
5424 /* Port USDM comes here */
5425 /* Port XSDM comes here */
5426 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5427 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5428 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5429 port ? USEM_PORT1_END : USEM_PORT0_END);
5430 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5431 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5432 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5433 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5434 /* Port UPB comes here */
5435 /* Port XPB comes here */
5437 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5438 port ? PBF_PORT1_END : PBF_PORT0_END);
5440 /* configure PBF to work without PAUSE mtu 9000 */
5441 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5443 /* update threshold */
5444 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5445 /* update init credit */
5446 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5449 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5451 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5454 /* tell the searcher where the T2 table is */
5455 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5457 wb_write[0] = U64_LO(bp->t2_mapping);
5458 wb_write[1] = U64_HI(bp->t2_mapping);
5459 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5460 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5461 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5462 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5464 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5465 /* Port SRCH comes here */
5467 /* Port CDU comes here */
5468 /* Port CFC comes here */
5470 if (CHIP_IS_E1(bp)) {
5471 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5472 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5474 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5475 port ? HC_PORT1_END : HC_PORT0_END);
5477 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5478 MISC_AEU_PORT0_START,
5479 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5480 /* init aeu_mask_attn_func_0/1:
5481 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5482 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5483 * bits 4-7 are used for "per vn group attention" */
5484 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5485 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5487 /* Port PXPCS comes here */
5488 /* Port EMAC0 comes here */
5489 /* Port EMAC1 comes here */
5490 /* Port DBU comes here */
5491 /* Port DBG comes here */
5492 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5493 port ? NIG_PORT1_END : NIG_PORT0_END);
5495 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5497 if (CHIP_IS_E1H(bp)) {
5499 struct cmng_struct_per_port m_cmng_port;
5502 /* 0x2 disable e1hov, 0x1 enable */
5503 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5504 (IS_E1HMF(bp) ? 0x1 : 0x2));
5506 /* Init RATE SHAPING and FAIRNESS contexts.
5507 Initialize as if there is 10G link. */
5508 wsum = bnx2x_calc_vn_wsum(bp);
5509 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5511 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5512 bnx2x_init_vn_minmax(bp, 2*vn + port,
5513 wsum, 10000, &m_cmng_port);
5516 /* Port MCP comes here */
5517 /* Port DMAE comes here */
5519 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5520 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5521 /* add SPIO 5 to group 0 */
5522 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5523 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5524 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5531 bnx2x__link_reset(bp);
5536 #define ILT_PER_FUNC (768/2)
5537 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5538 /* the phys address is shifted right 12 bits and has an added
5539 1=valid bit added to the 53rd bit
5540 then since this is a wide register(TM)
5541 we split it into two 32 bit writes
5543 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5544 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5545 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5546 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5548 #define CNIC_ILT_LINES 0
5550 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5554 if (CHIP_IS_E1H(bp))
5555 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5557 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5559 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5562 static int bnx2x_init_func(struct bnx2x *bp)
5564 int port = BP_PORT(bp);
5565 int func = BP_FUNC(bp);
5568 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5570 i = FUNC_ILT_BASE(func);
5572 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5573 if (CHIP_IS_E1H(bp)) {
5574 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5575 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5577 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5578 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5581 if (CHIP_IS_E1H(bp)) {
5582 for (i = 0; i < 9; i++)
5583 bnx2x_init_block(bp,
5584 cm_start[func][i], cm_end[func][i]);
5586 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5587 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5590 /* HC init per function */
5591 if (CHIP_IS_E1H(bp)) {
5592 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5594 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5595 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5597 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5599 if (CHIP_IS_E1H(bp))
5600 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5602 /* Reset PCIE errors for debug */
5603 REG_WR(bp, 0x2114, 0xffffffff);
5604 REG_WR(bp, 0x2120, 0xffffffff);
5609 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5613 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5614 BP_FUNC(bp), load_code);
5617 mutex_init(&bp->dmae_mutex);
5618 bnx2x_gunzip_init(bp);
5620 switch (load_code) {
5621 case FW_MSG_CODE_DRV_LOAD_COMMON:
5622 rc = bnx2x_init_common(bp);
5627 case FW_MSG_CODE_DRV_LOAD_PORT:
5629 rc = bnx2x_init_port(bp);
5634 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5636 rc = bnx2x_init_func(bp);
5642 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5646 if (!BP_NOMCP(bp)) {
5647 int func = BP_FUNC(bp);
5649 bp->fw_drv_pulse_wr_seq =
5650 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5651 DRV_PULSE_SEQ_MASK);
5652 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5653 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5654 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5658 /* this needs to be done before gunzip end */
5659 bnx2x_zero_def_sb(bp);
5660 for_each_queue(bp, i)
5661 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5664 bnx2x_gunzip_end(bp);
5669 /* send the MCP a request, block until there is a reply */
5670 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5672 int func = BP_FUNC(bp);
5673 u32 seq = ++bp->fw_seq;
5676 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5678 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5679 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5682 /* let the FW do it's magic ... */
5685 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5687 /* Give the FW up to 2 second (200*10ms) */
5688 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5690 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5691 cnt*delay, rc, seq);
5693 /* is this a reply to our command? */
5694 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5695 rc &= FW_MSG_CODE_MASK;
5699 BNX2X_ERR("FW failed to respond!\n");
5707 static void bnx2x_free_mem(struct bnx2x *bp)
5710 #define BNX2X_PCI_FREE(x, y, size) \
5713 pci_free_consistent(bp->pdev, size, x, y); \
5719 #define BNX2X_FREE(x) \
5730 for_each_queue(bp, i) {
5733 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5734 bnx2x_fp(bp, i, status_blk_mapping),
5735 sizeof(struct host_status_block) +
5736 sizeof(struct eth_tx_db_data));
5738 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5739 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5740 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5741 bnx2x_fp(bp, i, tx_desc_mapping),
5742 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5744 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5745 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5746 bnx2x_fp(bp, i, rx_desc_mapping),
5747 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5749 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5750 bnx2x_fp(bp, i, rx_comp_mapping),
5751 sizeof(struct eth_fast_path_rx_cqe) *
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5756 bnx2x_fp(bp, i, rx_sge_mapping),
5757 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5759 /* end of fastpath */
5761 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5762 sizeof(struct host_def_status_block));
5764 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5765 sizeof(struct bnx2x_slowpath));
5768 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5769 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5770 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5771 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5773 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5775 #undef BNX2X_PCI_FREE
5779 static int bnx2x_alloc_mem(struct bnx2x *bp)
5782 #define BNX2X_PCI_ALLOC(x, y, size) \
5784 x = pci_alloc_consistent(bp->pdev, size, y); \
5786 goto alloc_mem_err; \
5787 memset(x, 0, size); \
5790 #define BNX2X_ALLOC(x, size) \
5792 x = vmalloc(size); \
5794 goto alloc_mem_err; \
5795 memset(x, 0, size); \
5801 for_each_queue(bp, i) {
5802 bnx2x_fp(bp, i, bp) = bp;
5805 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5806 &bnx2x_fp(bp, i, status_blk_mapping),
5807 sizeof(struct host_status_block) +
5808 sizeof(struct eth_tx_db_data));
5810 bnx2x_fp(bp, i, hw_tx_prods) =
5811 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5813 bnx2x_fp(bp, i, tx_prods_mapping) =
5814 bnx2x_fp(bp, i, status_blk_mapping) +
5815 sizeof(struct host_status_block);
5817 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5818 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5819 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5820 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5821 &bnx2x_fp(bp, i, tx_desc_mapping),
5822 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5824 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5825 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5827 &bnx2x_fp(bp, i, rx_desc_mapping),
5828 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5831 &bnx2x_fp(bp, i, rx_comp_mapping),
5832 sizeof(struct eth_fast_path_rx_cqe) *
5836 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5837 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5839 &bnx2x_fp(bp, i, rx_sge_mapping),
5840 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5842 /* end of fastpath */
5844 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5845 sizeof(struct host_def_status_block));
5847 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5848 sizeof(struct bnx2x_slowpath));
5851 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5854 for (i = 0; i < 64*1024; i += 64) {
5855 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5856 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5859 /* allocate searcher T2 table
5860 we allocate 1/4 of alloc num for T2
5861 (which is not entered into the ILT) */
5862 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5865 for (i = 0; i < 16*1024; i += 64)
5866 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5868 /* now fixup the last line in the block to point to the next block */
5869 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5871 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5872 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5874 /* QM queues (128*MAX_CONN) */
5875 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5878 /* Slow path ring */
5879 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5887 #undef BNX2X_PCI_ALLOC
5891 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5895 for_each_queue(bp, i) {
5896 struct bnx2x_fastpath *fp = &bp->fp[i];
5898 u16 bd_cons = fp->tx_bd_cons;
5899 u16 sw_prod = fp->tx_pkt_prod;
5900 u16 sw_cons = fp->tx_pkt_cons;
5902 while (sw_cons != sw_prod) {
5903 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5909 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5913 for_each_queue(bp, j) {
5914 struct bnx2x_fastpath *fp = &bp->fp[j];
5916 for (i = 0; i < NUM_RX_BD; i++) {
5917 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5918 struct sk_buff *skb = rx_buf->skb;
5923 pci_unmap_single(bp->pdev,
5924 pci_unmap_addr(rx_buf, mapping),
5925 bp->rx_buf_use_size,
5926 PCI_DMA_FROMDEVICE);
5931 if (!fp->disable_tpa)
5932 bnx2x_free_tpa_pool(bp, fp,
5933 ETH_MAX_AGGREGATION_QUEUES_E1H);
5937 static void bnx2x_free_skbs(struct bnx2x *bp)
5939 bnx2x_free_tx_skbs(bp);
5940 bnx2x_free_rx_skbs(bp);
5943 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5947 free_irq(bp->msix_table[0].vector, bp->dev);
5948 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5949 bp->msix_table[0].vector);
5951 for_each_queue(bp, i) {
5952 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5953 "state %x\n", i, bp->msix_table[i + offset].vector,
5954 bnx2x_fp(bp, i, state));
5956 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5957 BNX2X_ERR("IRQ of fp #%d being freed while "
5958 "state != closed\n", i);
5960 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5964 static void bnx2x_free_irq(struct bnx2x *bp)
5966 if (bp->flags & USING_MSIX_FLAG) {
5967 bnx2x_free_msix_irqs(bp);
5968 pci_disable_msix(bp->pdev);
5969 bp->flags &= ~USING_MSIX_FLAG;
5972 free_irq(bp->pdev->irq, bp->dev);
5975 static int bnx2x_enable_msix(struct bnx2x *bp)
5979 bp->msix_table[0].entry = 0;
5981 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5983 for_each_queue(bp, i) {
5984 int igu_vec = offset + i + BP_L_ID(bp);
5986 bp->msix_table[i + offset].entry = igu_vec;
5987 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5988 "(fastpath #%u)\n", i + offset, igu_vec, i);
5991 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5992 bp->num_queues + offset);
5994 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5997 bp->flags |= USING_MSIX_FLAG;
6002 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6004 int i, rc, offset = 1;
6006 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6007 bp->dev->name, bp->dev);
6009 BNX2X_ERR("request sp irq failed\n");
6013 for_each_queue(bp, i) {
6014 rc = request_irq(bp->msix_table[i + offset].vector,
6015 bnx2x_msix_fp_int, 0,
6016 bp->dev->name, &bp->fp[i]);
6018 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6020 bnx2x_free_msix_irqs(bp);
6024 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6030 static int bnx2x_req_irq(struct bnx2x *bp)
6034 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6035 bp->dev->name, bp->dev);
6037 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6043 * Init service functions
6046 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6048 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6049 int port = BP_PORT(bp);
6052 * unicasts 0-31:port0 32-63:port1
6053 * multicast 64-127:port0 128-191:port1
6055 config->hdr.length_6b = 2;
6056 config->hdr.offset = port ? 31 : 0;
6057 config->hdr.client_id = BP_CL_ID(bp);
6058 config->hdr.reserved1 = 0;
6061 config->config_table[0].cam_entry.msb_mac_addr =
6062 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6063 config->config_table[0].cam_entry.middle_mac_addr =
6064 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6065 config->config_table[0].cam_entry.lsb_mac_addr =
6066 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6067 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6068 config->config_table[0].target_table_entry.flags = 0;
6069 config->config_table[0].target_table_entry.client_id = 0;
6070 config->config_table[0].target_table_entry.vlan_id = 0;
6072 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6073 config->config_table[0].cam_entry.msb_mac_addr,
6074 config->config_table[0].cam_entry.middle_mac_addr,
6075 config->config_table[0].cam_entry.lsb_mac_addr);
6078 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6079 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6080 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6081 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6082 config->config_table[1].target_table_entry.flags =
6083 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6084 config->config_table[1].target_table_entry.client_id = 0;
6085 config->config_table[1].target_table_entry.vlan_id = 0;
6087 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6088 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6089 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6092 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6094 struct mac_configuration_cmd_e1h *config =
6095 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6097 if (bp->state != BNX2X_STATE_OPEN) {
6098 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6102 /* CAM allocation for E1H
6103 * unicasts: by func number
6104 * multicast: 20+FUNC*20, 20 each
6106 config->hdr.length_6b = 1;
6107 config->hdr.offset = BP_FUNC(bp);
6108 config->hdr.client_id = BP_CL_ID(bp);
6109 config->hdr.reserved1 = 0;
6112 config->config_table[0].msb_mac_addr =
6113 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6114 config->config_table[0].middle_mac_addr =
6115 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6116 config->config_table[0].lsb_mac_addr =
6117 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6118 config->config_table[0].client_id = BP_L_ID(bp);
6119 config->config_table[0].vlan_id = 0;
6120 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6121 config->config_table[0].flags = BP_PORT(bp);
6123 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6124 config->config_table[0].msb_mac_addr,
6125 config->config_table[0].middle_mac_addr,
6126 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6128 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6129 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6130 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6133 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6134 int *state_p, int poll)
6136 /* can take a while if any port is running */
6139 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6140 poll ? "polling" : "waiting", state, idx);
6145 bnx2x_rx_int(bp->fp, 10);
6146 /* if index is different from 0
6147 * the reply for some commands will
6148 * be on the none default queue
6151 bnx2x_rx_int(&bp->fp[idx], 10);
6153 mb(); /* state is changed by bnx2x_sp_event() */
6155 if (*state_p == state)
6162 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6163 poll ? "polling" : "waiting", state, idx);
6164 #ifdef BNX2X_STOP_ON_ERROR
6171 static int bnx2x_setup_leading(struct bnx2x *bp)
6175 /* reset IGU state */
6176 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6179 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6181 /* Wait for completion */
6182 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6187 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6189 /* reset IGU state */
6190 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6193 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6194 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6196 /* Wait for completion */
6197 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6198 &(bp->fp[index].state), 0);
6201 static int bnx2x_poll(struct napi_struct *napi, int budget);
6202 static void bnx2x_set_rx_mode(struct net_device *dev);
6204 /* must be called with rtnl_lock */
6205 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6210 #ifdef BNX2X_STOP_ON_ERROR
6211 if (unlikely(bp->panic))
6215 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6217 /* Send LOAD_REQUEST command to MCP
6218 Returns the type of LOAD command:
6219 if it is the first port to be initialized
6220 common blocks should be initialized, otherwise - not
6222 if (!BP_NOMCP(bp)) {
6223 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6225 BNX2X_ERR("MCP response failure, aborting\n");
6228 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6229 return -EBUSY; /* other port in diagnostic mode */
6232 int port = BP_PORT(bp);
6234 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6235 load_count[0], load_count[1], load_count[2]);
6237 load_count[1 + port]++;
6238 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6239 load_count[0], load_count[1], load_count[2]);
6240 if (load_count[0] == 1)
6241 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6242 else if (load_count[1 + port] == 1)
6243 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6245 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6248 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6249 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6253 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6255 /* if we can't use MSI-X we only need one fp,
6256 * so try to enable MSI-X with the requested number of fp's
6257 * and fallback to inta with one fp
6263 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6264 /* user requested number */
6265 bp->num_queues = use_multi;
6268 bp->num_queues = min_t(u32, num_online_cpus(),
6273 if (bnx2x_enable_msix(bp)) {
6274 /* failed to enable MSI-X */
6277 BNX2X_ERR("Multi requested but failed"
6278 " to enable MSI-X\n");
6282 "set number of queues to %d\n", bp->num_queues);
6284 if (bnx2x_alloc_mem(bp))
6287 for_each_queue(bp, i)
6288 bnx2x_fp(bp, i, disable_tpa) =
6289 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6291 if (bp->flags & USING_MSIX_FLAG) {
6292 rc = bnx2x_req_msix_irqs(bp);
6294 pci_disable_msix(bp->pdev);
6299 rc = bnx2x_req_irq(bp);
6301 BNX2X_ERR("IRQ request failed, aborting\n");
6306 for_each_queue(bp, i)
6307 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6311 rc = bnx2x_init_hw(bp, load_code);
6313 BNX2X_ERR("HW init failed, aborting\n");
6317 /* Setup NIC internals and enable interrupts */
6318 bnx2x_nic_init(bp, load_code);
6320 /* Send LOAD_DONE command to MCP */
6321 if (!BP_NOMCP(bp)) {
6322 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6324 BNX2X_ERR("MCP response failure, aborting\n");
6326 goto load_int_disable;
6330 bnx2x_stats_init(bp);
6332 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6334 /* Enable Rx interrupt handling before sending the ramrod
6335 as it's completed on Rx FP queue */
6336 for_each_queue(bp, i)
6337 napi_enable(&bnx2x_fp(bp, i, napi));
6339 /* Enable interrupt handling */
6340 atomic_set(&bp->intr_sem, 0);
6342 rc = bnx2x_setup_leading(bp);
6344 BNX2X_ERR("Setup leading failed!\n");
6345 goto load_stop_netif;
6348 if (CHIP_IS_E1H(bp))
6349 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6350 BNX2X_ERR("!!! mf_cfg function disabled\n");
6351 bp->state = BNX2X_STATE_DISABLED;
6354 if (bp->state == BNX2X_STATE_OPEN)
6355 for_each_nondefault_queue(bp, i) {
6356 rc = bnx2x_setup_multi(bp, i);
6358 goto load_stop_netif;
6362 bnx2x_set_mac_addr_e1(bp);
6364 bnx2x_set_mac_addr_e1h(bp);
6367 bnx2x_initial_phy_init(bp);
6369 /* Start fast path */
6370 switch (load_mode) {
6372 /* Tx queue should be only reenabled */
6373 netif_wake_queue(bp->dev);
6374 bnx2x_set_rx_mode(bp->dev);
6378 netif_start_queue(bp->dev);
6379 bnx2x_set_rx_mode(bp->dev);
6380 if (bp->flags & USING_MSIX_FLAG)
6381 printk(KERN_INFO PFX "%s: using MSI-X\n",
6386 bnx2x_set_rx_mode(bp->dev);
6387 bp->state = BNX2X_STATE_DIAG;
6395 bnx2x__link_status_update(bp);
6397 /* start the timer */
6398 mod_timer(&bp->timer, jiffies + bp->current_interval);
6404 for_each_queue(bp, i)
6405 napi_disable(&bnx2x_fp(bp, i, napi));
6408 bnx2x_int_disable_sync(bp);
6413 /* Free SKBs, SGEs, TPA pool and driver internals */
6414 bnx2x_free_skbs(bp);
6415 for_each_queue(bp, i)
6416 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6417 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6421 /* TBD we really need to reset the chip
6422 if we want to recover from this */
6426 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6430 /* halt the connection */
6431 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6432 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6434 /* Wait for completion */
6435 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6436 &(bp->fp[index].state), 1);
6437 if (rc) /* timeout */
6440 /* delete cfc entry */
6441 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6443 /* Wait for completion */
6444 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6445 &(bp->fp[index].state), 1);
6449 static int bnx2x_stop_leading(struct bnx2x *bp)
6451 u16 dsb_sp_prod_idx;
6452 /* if the other port is handling traffic,
6453 this can take a lot of time */
6459 /* Send HALT ramrod */
6460 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6461 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6463 /* Wait for completion */
6464 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6465 &(bp->fp[0].state), 1);
6466 if (rc) /* timeout */
6469 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6471 /* Send PORT_DELETE ramrod */
6472 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6474 /* Wait for completion to arrive on default status block
6475 we are going to reset the chip anyway
6476 so there is not much to do if this times out
6478 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6481 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6482 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6483 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6484 #ifdef BNX2X_STOP_ON_ERROR
6494 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6495 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6500 static void bnx2x_reset_func(struct bnx2x *bp)
6502 int port = BP_PORT(bp);
6503 int func = BP_FUNC(bp);
6507 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6508 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6510 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6513 base = FUNC_ILT_BASE(func);
6514 for (i = base; i < base + ILT_PER_FUNC; i++)
6515 bnx2x_ilt_wr(bp, i, 0);
6518 static void bnx2x_reset_port(struct bnx2x *bp)
6520 int port = BP_PORT(bp);
6523 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6525 /* Do not rcv packets to BRB */
6526 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6527 /* Do not direct rcv packets that are not for MCP to the BRB */
6528 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6529 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6532 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6535 /* Check for BRB port occupancy */
6536 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6538 DP(NETIF_MSG_IFDOWN,
6539 "BRB1 is not empty %d blooks are occupied\n", val);
6541 /* TODO: Close Doorbell port? */
6544 static void bnx2x_reset_common(struct bnx2x *bp)
6547 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6549 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6552 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6554 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6555 BP_FUNC(bp), reset_code);
6557 switch (reset_code) {
6558 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6559 bnx2x_reset_port(bp);
6560 bnx2x_reset_func(bp);
6561 bnx2x_reset_common(bp);
6564 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6565 bnx2x_reset_port(bp);
6566 bnx2x_reset_func(bp);
6569 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6570 bnx2x_reset_func(bp);
6574 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6579 /* msut be called with rtnl_lock */
6580 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6582 int port = BP_PORT(bp);
6586 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6588 bp->rx_mode = BNX2X_RX_MODE_NONE;
6589 bnx2x_set_storm_rx_mode(bp);
6591 if (netif_running(bp->dev)) {
6592 netif_tx_disable(bp->dev);
6593 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6596 del_timer_sync(&bp->timer);
6597 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6598 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6599 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6601 /* Wait until tx fast path tasks complete */
6602 for_each_queue(bp, i) {
6603 struct bnx2x_fastpath *fp = &bp->fp[i];
6607 while (BNX2X_HAS_TX_WORK(fp)) {
6609 if (!netif_running(bp->dev))
6610 bnx2x_tx_int(fp, 1000);
6613 BNX2X_ERR("timeout waiting for queue[%d]\n",
6615 #ifdef BNX2X_STOP_ON_ERROR
6628 /* Give HW time to discard old tx messages */
6631 for_each_queue(bp, i)
6632 napi_disable(&bnx2x_fp(bp, i, napi));
6633 /* Disable interrupts after Tx and Rx are disabled on stack level */
6634 bnx2x_int_disable_sync(bp);
6639 if (unload_mode == UNLOAD_NORMAL)
6640 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6642 else if (bp->flags & NO_WOL_FLAG) {
6643 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6644 if (CHIP_IS_E1H(bp))
6645 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6647 } else if (bp->wol) {
6648 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6649 u8 *mac_addr = bp->dev->dev_addr;
6651 /* The mac address is written to entries 1-4 to
6652 preserve entry 0 which is used by the PMF */
6653 u8 entry = (BP_E1HVN(bp) + 1)*8;
6655 val = (mac_addr[0] << 8) | mac_addr[1];
6656 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6658 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6659 (mac_addr[4] << 8) | mac_addr[5];
6660 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6662 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6665 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667 if (CHIP_IS_E1H(bp))
6668 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6670 /* Close multi and leading connections
6671 Completions for ramrods are collected in a synchronous way */
6672 for_each_nondefault_queue(bp, i)
6673 if (bnx2x_stop_multi(bp, i))
6676 rc = bnx2x_stop_leading(bp);
6678 BNX2X_ERR("Stop leading failed!\n");
6679 #ifdef BNX2X_STOP_ON_ERROR
6688 reset_code = bnx2x_fw_command(bp, reset_code);
6690 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6691 load_count[0], load_count[1], load_count[2]);
6693 load_count[1 + port]--;
6694 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6695 load_count[0], load_count[1], load_count[2]);
6696 if (load_count[0] == 0)
6697 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6698 else if (load_count[1 + port] == 0)
6699 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6701 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6704 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6705 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6706 bnx2x__link_reset(bp);
6708 /* Reset the chip */
6709 bnx2x_reset_chip(bp, reset_code);
6711 /* Report UNLOAD_DONE to MCP */
6713 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6715 /* Free SKBs, SGEs, TPA pool and driver internals */
6716 bnx2x_free_skbs(bp);
6717 for_each_queue(bp, i)
6718 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6719 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6722 bp->state = BNX2X_STATE_CLOSED;
6724 netif_carrier_off(bp->dev);
6729 static void bnx2x_reset_task(struct work_struct *work)
6731 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6733 #ifdef BNX2X_STOP_ON_ERROR
6734 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6735 " so reset not done to allow debug dump,\n"
6736 KERN_ERR " you will need to reboot when done\n");
6742 if (!netif_running(bp->dev))
6743 goto reset_task_exit;
6745 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6746 bnx2x_nic_load(bp, LOAD_NORMAL);
6752 /* end of nic load/unload */
6757 * Init service functions
6760 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6764 /* Check if there is any driver already loaded */
6765 val = REG_RD(bp, MISC_REG_UNPREPARED);
6767 /* Check if it is the UNDI driver
6768 * UNDI driver initializes CID offset for normal bell to 0x7
6770 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6771 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6773 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6775 int func = BP_FUNC(bp);
6779 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6781 /* try unload UNDI on port 0 */
6784 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6785 DRV_MSG_SEQ_NUMBER_MASK);
6786 reset_code = bnx2x_fw_command(bp, reset_code);
6788 /* if UNDI is loaded on the other port */
6789 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6791 /* send "DONE" for previous unload */
6792 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6794 /* unload UNDI on port 1 */
6797 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6798 DRV_MSG_SEQ_NUMBER_MASK);
6799 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6801 bnx2x_fw_command(bp, reset_code);
6804 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6805 HC_REG_CONFIG_0), 0x1000);
6807 /* close input traffic and wait for it */
6808 /* Do not rcv packets to BRB */
6810 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6811 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6812 /* Do not direct rcv packets that are not for MCP to
6815 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6816 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6819 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6820 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6823 /* save NIG port swap info */
6824 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6825 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6828 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6831 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6833 /* take the NIG out of reset and restore swap values */
6835 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6836 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6837 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6838 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6840 /* send unload done to the MCP */
6841 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6843 /* restore our func and fw_seq */
6846 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6847 DRV_MSG_SEQ_NUMBER_MASK);
6849 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_UNDI);
6853 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6855 u32 val, val2, val3, val4, id;
6857 /* Get the chip revision id and number. */
6858 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6859 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6860 id = ((val & 0xffff) << 16);
6861 val = REG_RD(bp, MISC_REG_CHIP_REV);
6862 id |= ((val & 0xf) << 12);
6863 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6864 id |= ((val & 0xff) << 4);
6865 REG_RD(bp, MISC_REG_BOND_ID);
6867 bp->common.chip_id = id;
6868 bp->link_params.chip_id = bp->common.chip_id;
6869 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6871 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6872 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6873 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6874 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6875 bp->common.flash_size, bp->common.flash_size);
6877 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6878 bp->link_params.shmem_base = bp->common.shmem_base;
6879 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6881 if (!bp->common.shmem_base ||
6882 (bp->common.shmem_base < 0xA0000) ||
6883 (bp->common.shmem_base >= 0xC0000)) {
6884 BNX2X_DEV_INFO("MCP not active\n");
6885 bp->flags |= NO_MCP_FLAG;
6889 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6890 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6891 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6892 BNX2X_ERR("BAD MCP validity signature\n");
6894 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6895 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6897 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6898 bp->common.hw_config, bp->common.board);
6900 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6901 SHARED_HW_CFG_LED_MODE_MASK) >>
6902 SHARED_HW_CFG_LED_MODE_SHIFT);
6904 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6905 bp->common.bc_ver = val;
6906 BNX2X_DEV_INFO("bc_ver %X\n", val);
6907 if (val < BNX2X_BC_VER) {
6908 /* for now only warn
6909 * later we might need to enforce this */
6910 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6911 " please upgrade BC\n", BNX2X_BC_VER, val);
6913 BNX2X_DEV_INFO("%sWoL Capable\n",
6914 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6916 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6917 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6918 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6919 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6921 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6922 val, val2, val3, val4);
6925 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6928 int port = BP_PORT(bp);
6931 switch (switch_cfg) {
6933 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6936 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6937 switch (ext_phy_type) {
6938 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6939 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6942 bp->port.supported |= (SUPPORTED_10baseT_Half |
6943 SUPPORTED_10baseT_Full |
6944 SUPPORTED_100baseT_Half |
6945 SUPPORTED_100baseT_Full |
6946 SUPPORTED_1000baseT_Full |
6947 SUPPORTED_2500baseX_Full |
6952 SUPPORTED_Asym_Pause);
6955 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6956 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6959 bp->port.supported |= (SUPPORTED_10baseT_Half |
6960 SUPPORTED_10baseT_Full |
6961 SUPPORTED_100baseT_Half |
6962 SUPPORTED_100baseT_Full |
6963 SUPPORTED_1000baseT_Full |
6968 SUPPORTED_Asym_Pause);
6972 BNX2X_ERR("NVRAM config error. "
6973 "BAD SerDes ext_phy_config 0x%x\n",
6974 bp->link_params.ext_phy_config);
6978 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6980 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6983 case SWITCH_CFG_10G:
6984 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6987 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6988 switch (ext_phy_type) {
6989 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6990 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6993 bp->port.supported |= (SUPPORTED_10baseT_Half |
6994 SUPPORTED_10baseT_Full |
6995 SUPPORTED_100baseT_Half |
6996 SUPPORTED_100baseT_Full |
6997 SUPPORTED_1000baseT_Full |
6998 SUPPORTED_2500baseX_Full |
6999 SUPPORTED_10000baseT_Full |
7004 SUPPORTED_Asym_Pause);
7007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7008 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7011 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7014 SUPPORTED_Asym_Pause);
7017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7018 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7021 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7022 SUPPORTED_1000baseT_Full |
7025 SUPPORTED_Asym_Pause);
7028 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7029 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7032 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7033 SUPPORTED_1000baseT_Full |
7037 SUPPORTED_Asym_Pause);
7040 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7041 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7044 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7045 SUPPORTED_2500baseX_Full |
7046 SUPPORTED_1000baseT_Full |
7050 SUPPORTED_Asym_Pause);
7053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7054 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7057 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7061 SUPPORTED_Asym_Pause);
7064 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7065 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7066 bp->link_params.ext_phy_config);
7070 BNX2X_ERR("NVRAM config error. "
7071 "BAD XGXS ext_phy_config 0x%x\n",
7072 bp->link_params.ext_phy_config);
7076 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7078 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7083 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7084 bp->port.link_config);
7087 bp->link_params.phy_addr = bp->port.phy_addr;
7089 /* mask what we support according to speed_cap_mask */
7090 if (!(bp->link_params.speed_cap_mask &
7091 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7092 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7094 if (!(bp->link_params.speed_cap_mask &
7095 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7096 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7098 if (!(bp->link_params.speed_cap_mask &
7099 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7100 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7102 if (!(bp->link_params.speed_cap_mask &
7103 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7104 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7106 if (!(bp->link_params.speed_cap_mask &
7107 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7108 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7109 SUPPORTED_1000baseT_Full);
7111 if (!(bp->link_params.speed_cap_mask &
7112 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7113 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7115 if (!(bp->link_params.speed_cap_mask &
7116 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7117 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7119 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7122 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7124 bp->link_params.req_duplex = DUPLEX_FULL;
7126 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7127 case PORT_FEATURE_LINK_SPEED_AUTO:
7128 if (bp->port.supported & SUPPORTED_Autoneg) {
7129 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7130 bp->port.advertising = bp->port.supported;
7133 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7135 if ((ext_phy_type ==
7136 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7138 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7139 /* force 10G, no AN */
7140 bp->link_params.req_line_speed = SPEED_10000;
7141 bp->port.advertising =
7142 (ADVERTISED_10000baseT_Full |
7146 BNX2X_ERR("NVRAM config error. "
7147 "Invalid link_config 0x%x"
7148 " Autoneg not supported\n",
7149 bp->port.link_config);
7154 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7155 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7156 bp->link_params.req_line_speed = SPEED_10;
7157 bp->port.advertising = (ADVERTISED_10baseT_Full |
7160 BNX2X_ERR("NVRAM config error. "
7161 "Invalid link_config 0x%x"
7162 " speed_cap_mask 0x%x\n",
7163 bp->port.link_config,
7164 bp->link_params.speed_cap_mask);
7169 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7170 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7171 bp->link_params.req_line_speed = SPEED_10;
7172 bp->link_params.req_duplex = DUPLEX_HALF;
7173 bp->port.advertising = (ADVERTISED_10baseT_Half |
7176 BNX2X_ERR("NVRAM config error. "
7177 "Invalid link_config 0x%x"
7178 " speed_cap_mask 0x%x\n",
7179 bp->port.link_config,
7180 bp->link_params.speed_cap_mask);
7185 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7186 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7187 bp->link_params.req_line_speed = SPEED_100;
7188 bp->port.advertising = (ADVERTISED_100baseT_Full |
7191 BNX2X_ERR("NVRAM config error. "
7192 "Invalid link_config 0x%x"
7193 " speed_cap_mask 0x%x\n",
7194 bp->port.link_config,
7195 bp->link_params.speed_cap_mask);
7200 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7201 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7202 bp->link_params.req_line_speed = SPEED_100;
7203 bp->link_params.req_duplex = DUPLEX_HALF;
7204 bp->port.advertising = (ADVERTISED_100baseT_Half |
7207 BNX2X_ERR("NVRAM config error. "
7208 "Invalid link_config 0x%x"
7209 " speed_cap_mask 0x%x\n",
7210 bp->port.link_config,
7211 bp->link_params.speed_cap_mask);
7216 case PORT_FEATURE_LINK_SPEED_1G:
7217 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7218 bp->link_params.req_line_speed = SPEED_1000;
7219 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7222 BNX2X_ERR("NVRAM config error. "
7223 "Invalid link_config 0x%x"
7224 " speed_cap_mask 0x%x\n",
7225 bp->port.link_config,
7226 bp->link_params.speed_cap_mask);
7231 case PORT_FEATURE_LINK_SPEED_2_5G:
7232 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7233 bp->link_params.req_line_speed = SPEED_2500;
7234 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7237 BNX2X_ERR("NVRAM config error. "
7238 "Invalid link_config 0x%x"
7239 " speed_cap_mask 0x%x\n",
7240 bp->port.link_config,
7241 bp->link_params.speed_cap_mask);
7246 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7247 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7248 case PORT_FEATURE_LINK_SPEED_10G_KR:
7249 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7250 bp->link_params.req_line_speed = SPEED_10000;
7251 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7254 BNX2X_ERR("NVRAM config error. "
7255 "Invalid link_config 0x%x"
7256 " speed_cap_mask 0x%x\n",
7257 bp->port.link_config,
7258 bp->link_params.speed_cap_mask);
7264 BNX2X_ERR("NVRAM config error. "
7265 "BAD link speed link_config 0x%x\n",
7266 bp->port.link_config);
7267 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7268 bp->port.advertising = bp->port.supported;
7272 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7273 PORT_FEATURE_FLOW_CONTROL_MASK);
7274 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7275 !(bp->port.supported & SUPPORTED_Autoneg))
7276 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7278 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7279 " advertising 0x%x\n",
7280 bp->link_params.req_line_speed,
7281 bp->link_params.req_duplex,
7282 bp->link_params.req_flow_ctrl, bp->port.advertising);
7285 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7287 int port = BP_PORT(bp);
7290 bp->link_params.bp = bp;
7291 bp->link_params.port = port;
7293 bp->link_params.serdes_config =
7294 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7295 bp->link_params.lane_config =
7296 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7297 bp->link_params.ext_phy_config =
7299 dev_info.port_hw_config[port].external_phy_config);
7300 bp->link_params.speed_cap_mask =
7302 dev_info.port_hw_config[port].speed_capability_mask);
7304 bp->port.link_config =
7305 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7307 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7308 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7309 " link_config 0x%08x\n",
7310 bp->link_params.serdes_config,
7311 bp->link_params.lane_config,
7312 bp->link_params.ext_phy_config,
7313 bp->link_params.speed_cap_mask, bp->port.link_config);
7315 bp->link_params.switch_cfg = (bp->port.link_config &
7316 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7317 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7319 bnx2x_link_settings_requested(bp);
7321 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7322 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7323 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7324 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7325 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7326 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7327 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7328 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7329 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7330 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7333 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7335 int func = BP_FUNC(bp);
7339 bnx2x_get_common_hwinfo(bp);
7343 if (CHIP_IS_E1H(bp)) {
7345 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7348 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7349 FUNC_MF_CFG_E1HOV_TAG_MASK);
7350 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7354 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7356 func, bp->e1hov, bp->e1hov);
7358 BNX2X_DEV_INFO("Single function mode\n");
7360 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7361 " aborting\n", func);
7367 if (!BP_NOMCP(bp)) {
7368 bnx2x_get_port_hwinfo(bp);
7370 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7371 DRV_MSG_SEQ_NUMBER_MASK);
7372 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7376 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7377 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7378 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7379 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7380 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7381 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7382 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7383 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7384 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7385 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7386 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7388 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7396 /* only supposed to happen on emulation/FPGA */
7397 BNX2X_ERR("warning rendom MAC workaround active\n");
7398 random_ether_addr(bp->dev->dev_addr);
7399 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7405 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7407 int func = BP_FUNC(bp);
7410 /* Disable interrupt handling until HW is initialized */
7411 atomic_set(&bp->intr_sem, 1);
7413 mutex_init(&bp->port.phy_mutex);
7415 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7416 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7418 rc = bnx2x_get_hwinfo(bp);
7420 /* need to reset chip if undi was active */
7422 bnx2x_undi_unload(bp);
7424 if (CHIP_REV_IS_FPGA(bp))
7425 printk(KERN_ERR PFX "FPGA detected\n");
7427 if (BP_NOMCP(bp) && (func == 0))
7429 "MCP disabled, must load devices in order!\n");
7433 bp->flags &= ~TPA_ENABLE_FLAG;
7434 bp->dev->features &= ~NETIF_F_LRO;
7436 bp->flags |= TPA_ENABLE_FLAG;
7437 bp->dev->features |= NETIF_F_LRO;
7441 bp->tx_ring_size = MAX_TX_AVAIL;
7442 bp->rx_ring_size = MAX_RX_AVAIL;
7450 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7451 bp->current_interval = (poll ? poll : bp->timer_interval);
7453 init_timer(&bp->timer);
7454 bp->timer.expires = jiffies + bp->current_interval;
7455 bp->timer.data = (unsigned long) bp;
7456 bp->timer.function = bnx2x_timer;
7462 * ethtool service functions
7465 /* All ethtool functions called with rtnl_lock */
7467 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7469 struct bnx2x *bp = netdev_priv(dev);
7471 cmd->supported = bp->port.supported;
7472 cmd->advertising = bp->port.advertising;
7474 if (netif_carrier_ok(dev)) {
7475 cmd->speed = bp->link_vars.line_speed;
7476 cmd->duplex = bp->link_vars.duplex;
7478 cmd->speed = bp->link_params.req_line_speed;
7479 cmd->duplex = bp->link_params.req_duplex;
7484 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7485 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7486 if (vn_max_rate < cmd->speed)
7487 cmd->speed = vn_max_rate;
7490 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7492 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7494 switch (ext_phy_type) {
7495 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7498 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7499 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7500 cmd->port = PORT_FIBRE;
7503 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7504 cmd->port = PORT_TP;
7507 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7508 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7509 bp->link_params.ext_phy_config);
7513 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7514 bp->link_params.ext_phy_config);
7518 cmd->port = PORT_TP;
7520 cmd->phy_address = bp->port.phy_addr;
7521 cmd->transceiver = XCVR_INTERNAL;
7523 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7524 cmd->autoneg = AUTONEG_ENABLE;
7526 cmd->autoneg = AUTONEG_DISABLE;
7531 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7532 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7533 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7534 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7535 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7536 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7537 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7542 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7544 struct bnx2x *bp = netdev_priv(dev);
7550 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7551 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7552 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7553 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7554 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7555 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7556 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7558 if (cmd->autoneg == AUTONEG_ENABLE) {
7559 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7560 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7564 /* advertise the requested speed and duplex if supported */
7565 cmd->advertising &= bp->port.supported;
7567 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7568 bp->link_params.req_duplex = DUPLEX_FULL;
7569 bp->port.advertising |= (ADVERTISED_Autoneg |
7572 } else { /* forced speed */
7573 /* advertise the requested speed and duplex if supported */
7574 switch (cmd->speed) {
7576 if (cmd->duplex == DUPLEX_FULL) {
7577 if (!(bp->port.supported &
7578 SUPPORTED_10baseT_Full)) {
7580 "10M full not supported\n");
7584 advertising = (ADVERTISED_10baseT_Full |
7587 if (!(bp->port.supported &
7588 SUPPORTED_10baseT_Half)) {
7590 "10M half not supported\n");
7594 advertising = (ADVERTISED_10baseT_Half |
7600 if (cmd->duplex == DUPLEX_FULL) {
7601 if (!(bp->port.supported &
7602 SUPPORTED_100baseT_Full)) {
7604 "100M full not supported\n");
7608 advertising = (ADVERTISED_100baseT_Full |
7611 if (!(bp->port.supported &
7612 SUPPORTED_100baseT_Half)) {
7614 "100M half not supported\n");
7618 advertising = (ADVERTISED_100baseT_Half |
7624 if (cmd->duplex != DUPLEX_FULL) {
7625 DP(NETIF_MSG_LINK, "1G half not supported\n");
7629 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7630 DP(NETIF_MSG_LINK, "1G full not supported\n");
7634 advertising = (ADVERTISED_1000baseT_Full |
7639 if (cmd->duplex != DUPLEX_FULL) {
7641 "2.5G half not supported\n");
7645 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7647 "2.5G full not supported\n");
7651 advertising = (ADVERTISED_2500baseX_Full |
7656 if (cmd->duplex != DUPLEX_FULL) {
7657 DP(NETIF_MSG_LINK, "10G half not supported\n");
7661 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7662 DP(NETIF_MSG_LINK, "10G full not supported\n");
7666 advertising = (ADVERTISED_10000baseT_Full |
7671 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7675 bp->link_params.req_line_speed = cmd->speed;
7676 bp->link_params.req_duplex = cmd->duplex;
7677 bp->port.advertising = advertising;
7680 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7681 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7682 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7683 bp->port.advertising);
7685 if (netif_running(dev)) {
7686 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7693 #define PHY_FW_VER_LEN 10
7695 static void bnx2x_get_drvinfo(struct net_device *dev,
7696 struct ethtool_drvinfo *info)
7698 struct bnx2x *bp = netdev_priv(dev);
7699 char phy_fw_ver[PHY_FW_VER_LEN];
7701 strcpy(info->driver, DRV_MODULE_NAME);
7702 strcpy(info->version, DRV_MODULE_VERSION);
7704 phy_fw_ver[0] = '\0';
7706 bnx2x_phy_hw_lock(bp);
7707 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7708 (bp->state != BNX2X_STATE_CLOSED),
7709 phy_fw_ver, PHY_FW_VER_LEN);
7710 bnx2x_phy_hw_unlock(bp);
7713 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7714 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7715 BCM_5710_FW_REVISION_VERSION,
7716 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7717 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7718 strcpy(info->bus_info, pci_name(bp->pdev));
7719 info->n_stats = BNX2X_NUM_STATS;
7720 info->testinfo_len = BNX2X_NUM_TESTS;
7721 info->eedump_len = bp->common.flash_size;
7722 info->regdump_len = 0;
7725 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7727 struct bnx2x *bp = netdev_priv(dev);
7729 if (bp->flags & NO_WOL_FLAG) {
7733 wol->supported = WAKE_MAGIC;
7735 wol->wolopts = WAKE_MAGIC;
7739 memset(&wol->sopass, 0, sizeof(wol->sopass));
7742 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7744 struct bnx2x *bp = netdev_priv(dev);
7746 if (wol->wolopts & ~WAKE_MAGIC)
7749 if (wol->wolopts & WAKE_MAGIC) {
7750 if (bp->flags & NO_WOL_FLAG)
7760 static u32 bnx2x_get_msglevel(struct net_device *dev)
7762 struct bnx2x *bp = netdev_priv(dev);
7764 return bp->msglevel;
7767 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7769 struct bnx2x *bp = netdev_priv(dev);
7771 if (capable(CAP_NET_ADMIN))
7772 bp->msglevel = level;
7775 static int bnx2x_nway_reset(struct net_device *dev)
7777 struct bnx2x *bp = netdev_priv(dev);
7782 if (netif_running(dev)) {
7783 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7790 static int bnx2x_get_eeprom_len(struct net_device *dev)
7792 struct bnx2x *bp = netdev_priv(dev);
7794 return bp->common.flash_size;
7797 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7799 int port = BP_PORT(bp);
7803 /* adjust timeout for emulation/FPGA */
7804 count = NVRAM_TIMEOUT_COUNT;
7805 if (CHIP_REV_IS_SLOW(bp))
7808 /* request access to nvram interface */
7809 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7810 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7812 for (i = 0; i < count*10; i++) {
7813 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7814 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7820 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7821 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7828 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7830 int port = BP_PORT(bp);
7834 /* adjust timeout for emulation/FPGA */
7835 count = NVRAM_TIMEOUT_COUNT;
7836 if (CHIP_REV_IS_SLOW(bp))
7839 /* relinquish nvram interface */
7840 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7841 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7843 for (i = 0; i < count*10; i++) {
7844 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7845 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7851 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7852 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7859 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7863 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7865 /* enable both bits, even on read */
7866 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7867 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7868 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7871 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7875 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7877 /* disable both bits, even after read */
7878 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7879 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7880 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7883 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7889 /* build the command word */
7890 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7892 /* need to clear DONE bit separately */
7893 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7895 /* address of the NVRAM to read from */
7896 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7897 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7899 /* issue a read command */
7900 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7902 /* adjust timeout for emulation/FPGA */
7903 count = NVRAM_TIMEOUT_COUNT;
7904 if (CHIP_REV_IS_SLOW(bp))
7907 /* wait for completion */
7910 for (i = 0; i < count; i++) {
7912 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7914 if (val & MCPR_NVM_COMMAND_DONE) {
7915 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7916 /* we read nvram data in cpu order
7917 * but ethtool sees it as an array of bytes
7918 * converting to big-endian will do the work */
7919 val = cpu_to_be32(val);
7929 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7936 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7938 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7943 if (offset + buf_size > bp->common.flash_size) {
7944 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7945 " buf_size (0x%x) > flash_size (0x%x)\n",
7946 offset, buf_size, bp->common.flash_size);
7950 /* request access to nvram interface */
7951 rc = bnx2x_acquire_nvram_lock(bp);
7955 /* enable access to nvram interface */
7956 bnx2x_enable_nvram_access(bp);
7958 /* read the first word(s) */
7959 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7960 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7961 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7962 memcpy(ret_buf, &val, 4);
7964 /* advance to the next dword */
7965 offset += sizeof(u32);
7966 ret_buf += sizeof(u32);
7967 buf_size -= sizeof(u32);
7972 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7973 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7974 memcpy(ret_buf, &val, 4);
7977 /* disable access to nvram interface */
7978 bnx2x_disable_nvram_access(bp);
7979 bnx2x_release_nvram_lock(bp);
7984 static int bnx2x_get_eeprom(struct net_device *dev,
7985 struct ethtool_eeprom *eeprom, u8 *eebuf)
7987 struct bnx2x *bp = netdev_priv(dev);
7990 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7991 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7992 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7993 eeprom->len, eeprom->len);
7995 /* parameters already validated in ethtool_get_eeprom */
7997 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8002 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8007 /* build the command word */
8008 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8010 /* need to clear DONE bit separately */
8011 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8013 /* write the data */
8014 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8016 /* address of the NVRAM to write to */
8017 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8018 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8020 /* issue the write command */
8021 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8023 /* adjust timeout for emulation/FPGA */
8024 count = NVRAM_TIMEOUT_COUNT;
8025 if (CHIP_REV_IS_SLOW(bp))
8028 /* wait for completion */
8030 for (i = 0; i < count; i++) {
8032 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8033 if (val & MCPR_NVM_COMMAND_DONE) {
8042 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8044 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8052 if (offset + buf_size > bp->common.flash_size) {
8053 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8054 " buf_size (0x%x) > flash_size (0x%x)\n",
8055 offset, buf_size, bp->common.flash_size);
8059 /* request access to nvram interface */
8060 rc = bnx2x_acquire_nvram_lock(bp);
8064 /* enable access to nvram interface */
8065 bnx2x_enable_nvram_access(bp);
8067 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8068 align_offset = (offset & ~0x03);
8069 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8072 val &= ~(0xff << BYTE_OFFSET(offset));
8073 val |= (*data_buf << BYTE_OFFSET(offset));
8075 /* nvram data is returned as an array of bytes
8076 * convert it back to cpu order */
8077 val = be32_to_cpu(val);
8079 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8083 /* disable access to nvram interface */
8084 bnx2x_disable_nvram_access(bp);
8085 bnx2x_release_nvram_lock(bp);
8090 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8098 if (buf_size == 1) /* ethtool */
8099 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8101 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8103 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8108 if (offset + buf_size > bp->common.flash_size) {
8109 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8110 " buf_size (0x%x) > flash_size (0x%x)\n",
8111 offset, buf_size, bp->common.flash_size);
8115 /* request access to nvram interface */
8116 rc = bnx2x_acquire_nvram_lock(bp);
8120 /* enable access to nvram interface */
8121 bnx2x_enable_nvram_access(bp);
8124 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8125 while ((written_so_far < buf_size) && (rc == 0)) {
8126 if (written_so_far == (buf_size - sizeof(u32)))
8127 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8128 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8129 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8130 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8131 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8133 memcpy(&val, data_buf, 4);
8135 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8137 /* advance to the next dword */
8138 offset += sizeof(u32);
8139 data_buf += sizeof(u32);
8140 written_so_far += sizeof(u32);
8144 /* disable access to nvram interface */
8145 bnx2x_disable_nvram_access(bp);
8146 bnx2x_release_nvram_lock(bp);
8151 static int bnx2x_set_eeprom(struct net_device *dev,
8152 struct ethtool_eeprom *eeprom, u8 *eebuf)
8154 struct bnx2x *bp = netdev_priv(dev);
8157 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8158 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8159 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8160 eeprom->len, eeprom->len);
8162 /* parameters already validated in ethtool_set_eeprom */
8164 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8165 if (eeprom->magic == 0x00504859)
8168 bnx2x_phy_hw_lock(bp);
8169 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8170 bp->link_params.ext_phy_config,
8171 (bp->state != BNX2X_STATE_CLOSED),
8172 eebuf, eeprom->len);
8173 if ((bp->state == BNX2X_STATE_OPEN) ||
8174 (bp->state == BNX2X_STATE_DISABLED)) {
8175 rc |= bnx2x_link_reset(&bp->link_params,
8177 rc |= bnx2x_phy_init(&bp->link_params,
8180 bnx2x_phy_hw_unlock(bp);
8182 } else /* Only the PMF can access the PHY */
8185 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8190 static int bnx2x_get_coalesce(struct net_device *dev,
8191 struct ethtool_coalesce *coal)
8193 struct bnx2x *bp = netdev_priv(dev);
8195 memset(coal, 0, sizeof(struct ethtool_coalesce));
8197 coal->rx_coalesce_usecs = bp->rx_ticks;
8198 coal->tx_coalesce_usecs = bp->tx_ticks;
8203 static int bnx2x_set_coalesce(struct net_device *dev,
8204 struct ethtool_coalesce *coal)
8206 struct bnx2x *bp = netdev_priv(dev);
8208 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8209 if (bp->rx_ticks > 3000)
8210 bp->rx_ticks = 3000;
8212 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8213 if (bp->tx_ticks > 0x3000)
8214 bp->tx_ticks = 0x3000;
8216 if (netif_running(dev))
8217 bnx2x_update_coalesce(bp);
8222 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8224 struct bnx2x *bp = netdev_priv(dev);
8228 if (data & ETH_FLAG_LRO) {
8229 if (!(dev->features & NETIF_F_LRO)) {
8230 dev->features |= NETIF_F_LRO;
8231 bp->flags |= TPA_ENABLE_FLAG;
8235 } else if (dev->features & NETIF_F_LRO) {
8236 dev->features &= ~NETIF_F_LRO;
8237 bp->flags &= ~TPA_ENABLE_FLAG;
8241 if (changed && netif_running(dev)) {
8242 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8243 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8249 static void bnx2x_get_ringparam(struct net_device *dev,
8250 struct ethtool_ringparam *ering)
8252 struct bnx2x *bp = netdev_priv(dev);
8254 ering->rx_max_pending = MAX_RX_AVAIL;
8255 ering->rx_mini_max_pending = 0;
8256 ering->rx_jumbo_max_pending = 0;
8258 ering->rx_pending = bp->rx_ring_size;
8259 ering->rx_mini_pending = 0;
8260 ering->rx_jumbo_pending = 0;
8262 ering->tx_max_pending = MAX_TX_AVAIL;
8263 ering->tx_pending = bp->tx_ring_size;
8266 static int bnx2x_set_ringparam(struct net_device *dev,
8267 struct ethtool_ringparam *ering)
8269 struct bnx2x *bp = netdev_priv(dev);
8272 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8273 (ering->tx_pending > MAX_TX_AVAIL) ||
8274 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8277 bp->rx_ring_size = ering->rx_pending;
8278 bp->tx_ring_size = ering->tx_pending;
8280 if (netif_running(dev)) {
8281 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8282 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8288 static void bnx2x_get_pauseparam(struct net_device *dev,
8289 struct ethtool_pauseparam *epause)
8291 struct bnx2x *bp = netdev_priv(dev);
8293 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8294 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8296 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8298 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8301 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8302 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8303 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8306 static int bnx2x_set_pauseparam(struct net_device *dev,
8307 struct ethtool_pauseparam *epause)
8309 struct bnx2x *bp = netdev_priv(dev);
8314 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8315 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8316 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8318 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8320 if (epause->rx_pause)
8321 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8323 if (epause->tx_pause)
8324 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8326 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8327 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8329 if (epause->autoneg) {
8330 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8331 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8335 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8336 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8340 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8342 if (netif_running(dev)) {
8343 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8350 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8352 struct bnx2x *bp = netdev_priv(dev);
8357 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8359 struct bnx2x *bp = netdev_priv(dev);
8365 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8368 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8369 dev->features |= NETIF_F_TSO6;
8371 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8372 dev->features &= ~NETIF_F_TSO6;
8378 static const struct {
8379 char string[ETH_GSTRING_LEN];
8380 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8381 { "register_test (offline)" },
8382 { "memory_test (offline)" },
8383 { "loopback_test (offline)" },
8384 { "nvram_test (online)" },
8385 { "interrupt_test (online)" },
8386 { "link_test (online)" },
8387 { "idle check (online)" },
8388 { "MC errors (online)" }
8391 static int bnx2x_self_test_count(struct net_device *dev)
8393 return BNX2X_NUM_TESTS;
8396 static int bnx2x_test_registers(struct bnx2x *bp)
8398 int idx, i, rc = -ENODEV;
8400 static const struct {
8405 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8406 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8407 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8408 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8409 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8410 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8411 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8412 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8413 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8414 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8415 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8416 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8417 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8418 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8419 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8420 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8421 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8422 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8423 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8424 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8425 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8426 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8427 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8428 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8429 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8430 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8431 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8432 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8433 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8434 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8435 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8436 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8437 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8438 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8439 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8440 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8441 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8442 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8444 { 0xffffffff, 0, 0x00000000 }
8447 if (!netif_running(bp->dev))
8450 /* Repeat the test twice:
8451 First by writing 0x00000000, second by writing 0xffffffff */
8452 for (idx = 0; idx < 2; idx++) {
8459 wr_val = 0xffffffff;
8463 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8464 u32 offset, mask, save_val, val;
8465 int port = BP_PORT(bp);
8467 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8468 mask = reg_tbl[i].mask;
8470 save_val = REG_RD(bp, offset);
8472 REG_WR(bp, offset, wr_val);
8473 val = REG_RD(bp, offset);
8475 /* Restore the original register's value */
8476 REG_WR(bp, offset, save_val);
8478 /* verify that value is as expected value */
8479 if ((val & mask) != (wr_val & mask))
8490 static int bnx2x_test_memory(struct bnx2x *bp)
8492 int i, j, rc = -ENODEV;
8494 static const struct {
8498 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8499 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8500 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8501 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8502 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8503 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8504 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8508 static const struct {
8513 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8514 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8515 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8516 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8517 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8518 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8520 { NULL, 0xffffffff, 0 }
8523 if (!netif_running(bp->dev))
8526 /* Go through all the memories */
8527 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8528 for (j = 0; j < mem_tbl[i].size; j++)
8529 REG_RD(bp, mem_tbl[i].offset + j*4);
8531 /* Check the parity status */
8532 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8533 val = REG_RD(bp, prty_tbl[i].offset);
8534 if (val & ~(prty_tbl[i].mask)) {
8536 "%s is 0x%x\n", prty_tbl[i].name, val);
8547 static void bnx2x_netif_start(struct bnx2x *bp)
8551 if (atomic_dec_and_test(&bp->intr_sem)) {
8552 if (netif_running(bp->dev)) {
8553 bnx2x_int_enable(bp);
8554 for_each_queue(bp, i)
8555 napi_enable(&bnx2x_fp(bp, i, napi));
8556 if (bp->state == BNX2X_STATE_OPEN)
8557 netif_wake_queue(bp->dev);
8562 static void bnx2x_netif_stop(struct bnx2x *bp)
8566 if (netif_running(bp->dev)) {
8567 netif_tx_disable(bp->dev);
8568 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8569 for_each_queue(bp, i)
8570 napi_disable(&bnx2x_fp(bp, i, napi));
8572 bnx2x_int_disable_sync(bp);
8575 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8580 while (bnx2x_link_test(bp) && cnt--)
8584 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8586 unsigned int pkt_size, num_pkts, i;
8587 struct sk_buff *skb;
8588 unsigned char *packet;
8589 struct bnx2x_fastpath *fp = &bp->fp[0];
8590 u16 tx_start_idx, tx_idx;
8591 u16 rx_start_idx, rx_idx;
8593 struct sw_tx_bd *tx_buf;
8594 struct eth_tx_bd *tx_bd;
8596 union eth_rx_cqe *cqe;
8598 struct sw_rx_bd *rx_buf;
8602 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8603 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8604 bnx2x_phy_hw_lock(bp);
8605 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8606 bnx2x_phy_hw_unlock(bp);
8608 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8609 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8610 bnx2x_phy_hw_lock(bp);
8611 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8612 bnx2x_phy_hw_unlock(bp);
8613 /* wait until link state is restored */
8614 bnx2x_wait_for_link(bp, link_up);
8620 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8623 goto test_loopback_exit;
8625 packet = skb_put(skb, pkt_size);
8626 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8627 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8628 for (i = ETH_HLEN; i < pkt_size; i++)
8629 packet[i] = (unsigned char) (i & 0xff);
8632 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8633 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8635 pkt_prod = fp->tx_pkt_prod++;
8636 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8637 tx_buf->first_bd = fp->tx_bd_prod;
8640 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8641 mapping = pci_map_single(bp->pdev, skb->data,
8642 skb_headlen(skb), PCI_DMA_TODEVICE);
8643 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8644 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8645 tx_bd->nbd = cpu_to_le16(1);
8646 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8647 tx_bd->vlan = cpu_to_le16(pkt_prod);
8648 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8649 ETH_TX_BD_FLAGS_END_BD);
8650 tx_bd->general_data = ((UNICAST_ADDRESS <<
8651 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8653 fp->hw_tx_prods->bds_prod =
8654 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8655 mb(); /* FW restriction: must not reorder writing nbd and packets */
8656 fp->hw_tx_prods->packets_prod =
8657 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8658 DOORBELL(bp, FP_IDX(fp), 0);
8664 bp->dev->trans_start = jiffies;
8668 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8669 if (tx_idx != tx_start_idx + num_pkts)
8670 goto test_loopback_exit;
8672 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8673 if (rx_idx != rx_start_idx + num_pkts)
8674 goto test_loopback_exit;
8676 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8677 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8678 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8679 goto test_loopback_rx_exit;
8681 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8682 if (len != pkt_size)
8683 goto test_loopback_rx_exit;
8685 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8687 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8688 for (i = ETH_HLEN; i < pkt_size; i++)
8689 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8690 goto test_loopback_rx_exit;
8694 test_loopback_rx_exit:
8695 bp->dev->last_rx = jiffies;
8697 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8698 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8699 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8700 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8702 /* Update producers */
8703 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8705 mmiowb(); /* keep prod updates ordered */
8708 bp->link_params.loopback_mode = LOOPBACK_NONE;
8713 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8717 if (!netif_running(bp->dev))
8718 return BNX2X_LOOPBACK_FAILED;
8720 bnx2x_netif_stop(bp);
8722 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8723 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8724 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8727 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8728 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8729 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8732 bnx2x_netif_start(bp);
8737 #define CRC32_RESIDUAL 0xdebb20e3
8739 static int bnx2x_test_nvram(struct bnx2x *bp)
8741 static const struct {
8745 { 0, 0x14 }, /* bootstrap */
8746 { 0x14, 0xec }, /* dir */
8747 { 0x100, 0x350 }, /* manuf_info */
8748 { 0x450, 0xf0 }, /* feature_info */
8749 { 0x640, 0x64 }, /* upgrade_key_info */
8751 { 0x708, 0x70 }, /* manuf_key_info */
8756 u8 *data = (u8 *)buf;
8760 rc = bnx2x_nvram_read(bp, 0, data, 4);
8762 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8763 goto test_nvram_exit;
8766 magic = be32_to_cpu(buf[0]);
8767 if (magic != 0x669955aa) {
8768 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8770 goto test_nvram_exit;
8773 for (i = 0; nvram_tbl[i].size; i++) {
8775 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8779 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8780 goto test_nvram_exit;
8783 csum = ether_crc_le(nvram_tbl[i].size, data);
8784 if (csum != CRC32_RESIDUAL) {
8786 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8788 goto test_nvram_exit;
8796 static int bnx2x_test_intr(struct bnx2x *bp)
8798 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8801 if (!netif_running(bp->dev))
8804 config->hdr.length_6b = 0;
8805 config->hdr.offset = 0;
8806 config->hdr.client_id = BP_CL_ID(bp);
8807 config->hdr.reserved1 = 0;
8809 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8810 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8811 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8813 bp->set_mac_pending++;
8814 for (i = 0; i < 10; i++) {
8815 if (!bp->set_mac_pending)
8817 msleep_interruptible(10);
8826 static void bnx2x_self_test(struct net_device *dev,
8827 struct ethtool_test *etest, u64 *buf)
8829 struct bnx2x *bp = netdev_priv(dev);
8831 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8833 if (!netif_running(dev))
8836 /* offline tests are not suppoerted in MF mode */
8838 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8840 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8843 link_up = bp->link_vars.link_up;
8844 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8845 bnx2x_nic_load(bp, LOAD_DIAG);
8846 /* wait until link state is restored */
8847 bnx2x_wait_for_link(bp, link_up);
8849 if (bnx2x_test_registers(bp) != 0) {
8851 etest->flags |= ETH_TEST_FL_FAILED;
8853 if (bnx2x_test_memory(bp) != 0) {
8855 etest->flags |= ETH_TEST_FL_FAILED;
8857 buf[2] = bnx2x_test_loopback(bp, link_up);
8859 etest->flags |= ETH_TEST_FL_FAILED;
8861 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8862 bnx2x_nic_load(bp, LOAD_NORMAL);
8863 /* wait until link state is restored */
8864 bnx2x_wait_for_link(bp, link_up);
8866 if (bnx2x_test_nvram(bp) != 0) {
8868 etest->flags |= ETH_TEST_FL_FAILED;
8870 if (bnx2x_test_intr(bp) != 0) {
8872 etest->flags |= ETH_TEST_FL_FAILED;
8875 if (bnx2x_link_test(bp) != 0) {
8877 etest->flags |= ETH_TEST_FL_FAILED;
8879 buf[7] = bnx2x_mc_assert(bp);
8881 etest->flags |= ETH_TEST_FL_FAILED;
8883 #ifdef BNX2X_EXTRA_DEBUG
8884 bnx2x_panic_dump(bp);
8888 static const struct {
8892 #define STATS_FLAGS_PORT 1
8893 #define STATS_FLAGS_FUNC 2
8894 u8 string[ETH_GSTRING_LEN];
8895 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8896 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8897 8, STATS_FLAGS_FUNC, "rx_bytes" },
8898 { STATS_OFFSET32(error_bytes_received_hi),
8899 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8900 { STATS_OFFSET32(total_bytes_transmitted_hi),
8901 8, STATS_FLAGS_FUNC, "tx_bytes" },
8902 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8903 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8904 { STATS_OFFSET32(total_unicast_packets_received_hi),
8905 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8906 { STATS_OFFSET32(total_multicast_packets_received_hi),
8907 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8908 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8909 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8910 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8911 8, STATS_FLAGS_FUNC, "tx_packets" },
8912 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8913 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8914 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8915 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8916 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8917 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8918 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8919 8, STATS_FLAGS_PORT, "rx_align_errors" },
8920 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8921 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8922 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8923 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8924 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8925 8, STATS_FLAGS_PORT, "tx_deferred" },
8926 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8927 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8928 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8929 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8930 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8931 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8932 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8933 8, STATS_FLAGS_PORT, "rx_fragments" },
8934 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8935 8, STATS_FLAGS_PORT, "rx_jabbers" },
8936 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8937 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8938 { STATS_OFFSET32(jabber_packets_received),
8939 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8940 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8941 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8942 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8943 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8944 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8945 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8946 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8947 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8948 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8949 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8950 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8951 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8952 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8953 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8954 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8955 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8956 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8957 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8958 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8959 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8960 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8961 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8962 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8963 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8964 { STATS_OFFSET32(mac_filter_discard),
8965 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8966 { STATS_OFFSET32(no_buff_discard),
8967 4, STATS_FLAGS_FUNC, "rx_discards" },
8968 { STATS_OFFSET32(xxoverflow_discard),
8969 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8970 { STATS_OFFSET32(brb_drop_hi),
8971 8, STATS_FLAGS_PORT, "brb_discard" },
8972 { STATS_OFFSET32(brb_truncate_hi),
8973 8, STATS_FLAGS_PORT, "brb_truncate" },
8974 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8975 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8976 { STATS_OFFSET32(rx_skb_alloc_failed),
8977 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8978 /* 42 */{ STATS_OFFSET32(hw_csum_err),
8979 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8982 #define IS_NOT_E1HMF_STAT(bp, i) \
8983 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8985 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8987 struct bnx2x *bp = netdev_priv(dev);
8990 switch (stringset) {
8992 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8993 if (IS_NOT_E1HMF_STAT(bp, i))
8995 strcpy(buf + j*ETH_GSTRING_LEN,
8996 bnx2x_stats_arr[i].string);
9002 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9007 static int bnx2x_get_stats_count(struct net_device *dev)
9009 struct bnx2x *bp = netdev_priv(dev);
9010 int i, num_stats = 0;
9012 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9013 if (IS_NOT_E1HMF_STAT(bp, i))
9020 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9021 struct ethtool_stats *stats, u64 *buf)
9023 struct bnx2x *bp = netdev_priv(dev);
9024 u32 *hw_stats = (u32 *)&bp->eth_stats;
9027 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9028 if (IS_NOT_E1HMF_STAT(bp, i))
9031 if (bnx2x_stats_arr[i].size == 0) {
9032 /* skip this counter */
9037 if (bnx2x_stats_arr[i].size == 4) {
9038 /* 4-byte counter */
9039 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9043 /* 8-byte counter */
9044 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9045 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9050 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9052 struct bnx2x *bp = netdev_priv(dev);
9053 int port = BP_PORT(bp);
9056 if (!netif_running(dev))
9065 for (i = 0; i < (data * 2); i++) {
9067 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9068 bp->link_params.hw_led_mode,
9069 bp->link_params.chip_id);
9071 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9072 bp->link_params.hw_led_mode,
9073 bp->link_params.chip_id);
9075 msleep_interruptible(500);
9076 if (signal_pending(current))
9080 if (bp->link_vars.link_up)
9081 bnx2x_set_led(bp, port, LED_MODE_OPER,
9082 bp->link_vars.line_speed,
9083 bp->link_params.hw_led_mode,
9084 bp->link_params.chip_id);
9089 static struct ethtool_ops bnx2x_ethtool_ops = {
9090 .get_settings = bnx2x_get_settings,
9091 .set_settings = bnx2x_set_settings,
9092 .get_drvinfo = bnx2x_get_drvinfo,
9093 .get_wol = bnx2x_get_wol,
9094 .set_wol = bnx2x_set_wol,
9095 .get_msglevel = bnx2x_get_msglevel,
9096 .set_msglevel = bnx2x_set_msglevel,
9097 .nway_reset = bnx2x_nway_reset,
9098 .get_link = ethtool_op_get_link,
9099 .get_eeprom_len = bnx2x_get_eeprom_len,
9100 .get_eeprom = bnx2x_get_eeprom,
9101 .set_eeprom = bnx2x_set_eeprom,
9102 .get_coalesce = bnx2x_get_coalesce,
9103 .set_coalesce = bnx2x_set_coalesce,
9104 .get_ringparam = bnx2x_get_ringparam,
9105 .set_ringparam = bnx2x_set_ringparam,
9106 .get_pauseparam = bnx2x_get_pauseparam,
9107 .set_pauseparam = bnx2x_set_pauseparam,
9108 .get_rx_csum = bnx2x_get_rx_csum,
9109 .set_rx_csum = bnx2x_set_rx_csum,
9110 .get_tx_csum = ethtool_op_get_tx_csum,
9111 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9112 .set_flags = bnx2x_set_flags,
9113 .get_flags = ethtool_op_get_flags,
9114 .get_sg = ethtool_op_get_sg,
9115 .set_sg = ethtool_op_set_sg,
9116 .get_tso = ethtool_op_get_tso,
9117 .set_tso = bnx2x_set_tso,
9118 .self_test_count = bnx2x_self_test_count,
9119 .self_test = bnx2x_self_test,
9120 .get_strings = bnx2x_get_strings,
9121 .phys_id = bnx2x_phys_id,
9122 .get_stats_count = bnx2x_get_stats_count,
9123 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9126 /* end of ethtool_ops */
9128 /****************************************************************************
9129 * General service functions
9130 ****************************************************************************/
9132 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9136 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9140 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9141 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9142 PCI_PM_CTRL_PME_STATUS));
9144 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9145 /* delay required during transition out of D3hot */
9150 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9154 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9156 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9159 /* No more memory access after this point until
9160 * device is brought back to D0.
9171 * net_device service functions
9174 static int bnx2x_poll(struct napi_struct *napi, int budget)
9176 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9178 struct bnx2x *bp = fp->bp;
9181 #ifdef BNX2X_STOP_ON_ERROR
9182 if (unlikely(bp->panic))
9186 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9187 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9188 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9190 bnx2x_update_fpsb_idx(fp);
9192 if (BNX2X_HAS_TX_WORK(fp))
9193 bnx2x_tx_int(fp, budget);
9195 if (BNX2X_HAS_RX_WORK(fp))
9196 work_done = bnx2x_rx_int(fp, budget);
9198 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9200 /* must not complete if we consumed full budget */
9201 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9203 #ifdef BNX2X_STOP_ON_ERROR
9206 netif_rx_complete(bp->dev, napi);
9208 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9209 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9210 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9211 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9217 /* we split the first BD into headers and data BDs
9218 * to ease the pain of our fellow micocode engineers
9219 * we use one mapping for both BDs
9220 * So far this has only been observed to happen
9221 * in Other Operating Systems(TM)
9223 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9224 struct bnx2x_fastpath *fp,
9225 struct eth_tx_bd **tx_bd, u16 hlen,
9226 u16 bd_prod, int nbd)
9228 struct eth_tx_bd *h_tx_bd = *tx_bd;
9229 struct eth_tx_bd *d_tx_bd;
9231 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9233 /* first fix first BD */
9234 h_tx_bd->nbd = cpu_to_le16(nbd);
9235 h_tx_bd->nbytes = cpu_to_le16(hlen);
9237 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9238 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9239 h_tx_bd->addr_lo, h_tx_bd->nbd);
9241 /* now get a new data BD
9242 * (after the pbd) and fill it */
9243 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9244 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9246 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9247 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9249 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9250 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9251 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9253 /* this marks the BD as one that has no individual mapping
9254 * the FW ignores this flag in a BD not marked start
9256 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9257 DP(NETIF_MSG_TX_QUEUED,
9258 "TSO split data size is %d (%x:%x)\n",
9259 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9261 /* update tx_bd for marking the last BD flag */
9267 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9270 csum = (u16) ~csum_fold(csum_sub(csum,
9271 csum_partial(t_header - fix, fix, 0)));
9274 csum = (u16) ~csum_fold(csum_add(csum,
9275 csum_partial(t_header, -fix, 0)));
9277 return swab16(csum);
9280 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9284 if (skb->ip_summed != CHECKSUM_PARTIAL)
9288 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9290 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9291 rc |= XMIT_CSUM_TCP;
9295 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9296 rc |= XMIT_CSUM_TCP;
9300 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9303 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9309 /* check if packet requires linearization (packet is too fragmented) */
9310 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9315 int first_bd_sz = 0;
9317 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9318 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9320 if (xmit_type & XMIT_GSO) {
9321 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9322 /* Check if LSO packet needs to be copied:
9323 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9324 int wnd_size = MAX_FETCH_BD - 3;
9325 /* Number of widnows to check */
9326 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9331 /* Headers length */
9332 hlen = (int)(skb_transport_header(skb) - skb->data) +
9335 /* Amount of data (w/o headers) on linear part of SKB*/
9336 first_bd_sz = skb_headlen(skb) - hlen;
9338 wnd_sum = first_bd_sz;
9340 /* Calculate the first sum - it's special */
9341 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9343 skb_shinfo(skb)->frags[frag_idx].size;
9345 /* If there was data on linear skb data - check it */
9346 if (first_bd_sz > 0) {
9347 if (unlikely(wnd_sum < lso_mss)) {
9352 wnd_sum -= first_bd_sz;
9355 /* Others are easier: run through the frag list and
9356 check all windows */
9357 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9359 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9361 if (unlikely(wnd_sum < lso_mss)) {
9366 skb_shinfo(skb)->frags[wnd_idx].size;
9370 /* in non-LSO too fragmented packet should always
9377 if (unlikely(to_copy))
9378 DP(NETIF_MSG_TX_QUEUED,
9379 "Linearization IS REQUIRED for %s packet. "
9380 "num_frags %d hlen %d first_bd_sz %d\n",
9381 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9382 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9387 /* called with netif_tx_lock
9388 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9389 * netif_wake_queue()
9391 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9393 struct bnx2x *bp = netdev_priv(dev);
9394 struct bnx2x_fastpath *fp;
9395 struct sw_tx_bd *tx_buf;
9396 struct eth_tx_bd *tx_bd;
9397 struct eth_tx_parse_bd *pbd = NULL;
9398 u16 pkt_prod, bd_prod;
9401 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9402 int vlan_off = (bp->e1hov ? 4 : 0);
9406 #ifdef BNX2X_STOP_ON_ERROR
9407 if (unlikely(bp->panic))
9408 return NETDEV_TX_BUSY;
9411 fp_index = (smp_processor_id() % bp->num_queues);
9412 fp = &bp->fp[fp_index];
9414 if (unlikely(bnx2x_tx_avail(bp->fp) <
9415 (skb_shinfo(skb)->nr_frags + 3))) {
9416 bp->eth_stats.driver_xoff++,
9417 netif_stop_queue(dev);
9418 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9419 return NETDEV_TX_BUSY;
9422 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9423 " gso type %x xmit_type %x\n",
9424 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9425 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9427 /* First, check if we need to linearaize the skb
9428 (due to FW restrictions) */
9429 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9430 /* Statistics of linearization */
9432 if (skb_linearize(skb) != 0) {
9433 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9434 "silently dropping this SKB\n");
9435 dev_kfree_skb_any(skb);
9436 return NETDEV_TX_OK;
9441 Please read carefully. First we use one BD which we mark as start,
9442 then for TSO or xsum we have a parsing info BD,
9443 and only then we have the rest of the TSO BDs.
9444 (don't forget to mark the last one as last,
9445 and to unmap only AFTER you write to the BD ...)
9446 And above all, all pdb sizes are in words - NOT DWORDS!
9449 pkt_prod = fp->tx_pkt_prod++;
9450 bd_prod = TX_BD(fp->tx_bd_prod);
9452 /* get a tx_buf and first BD */
9453 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9454 tx_bd = &fp->tx_desc_ring[bd_prod];
9456 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9457 tx_bd->general_data = (UNICAST_ADDRESS <<
9458 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9459 tx_bd->general_data |= 1; /* header nbd */
9461 /* remember the first BD of the packet */
9462 tx_buf->first_bd = fp->tx_bd_prod;
9465 DP(NETIF_MSG_TX_QUEUED,
9466 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9467 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9469 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9470 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9471 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9474 tx_bd->vlan = cpu_to_le16(pkt_prod);
9478 /* turn on parsing and get a BD */
9479 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9480 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9482 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9485 if (xmit_type & XMIT_CSUM) {
9486 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9488 /* for now NS flag is not used in Linux */
9489 pbd->global_data = (hlen |
9490 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9491 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9493 pbd->ip_hlen = (skb_transport_header(skb) -
9494 skb_network_header(skb)) / 2;
9496 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9498 pbd->total_hlen = cpu_to_le16(hlen);
9499 hlen = hlen*2 - vlan_off;
9501 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9503 if (xmit_type & XMIT_CSUM_V4)
9504 tx_bd->bd_flags.as_bitfield |=
9505 ETH_TX_BD_FLAGS_IP_CSUM;
9507 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9509 if (xmit_type & XMIT_CSUM_TCP) {
9510 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9513 s8 fix = SKB_CS_OFF(skb); /* signed! */
9515 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9516 pbd->cs_offset = fix / 2;
9518 DP(NETIF_MSG_TX_QUEUED,
9519 "hlen %d offset %d fix %d csum before fix %x\n",
9520 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9523 /* HW bug: fixup the CSUM */
9524 pbd->tcp_pseudo_csum =
9525 bnx2x_csum_fix(skb_transport_header(skb),
9528 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9529 pbd->tcp_pseudo_csum);
9533 mapping = pci_map_single(bp->pdev, skb->data,
9534 skb_headlen(skb), PCI_DMA_TODEVICE);
9536 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9537 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9538 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9539 tx_bd->nbd = cpu_to_le16(nbd);
9540 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9542 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9543 " nbytes %d flags %x vlan %x\n",
9544 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9545 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9546 le16_to_cpu(tx_bd->vlan));
9548 if (xmit_type & XMIT_GSO) {
9550 DP(NETIF_MSG_TX_QUEUED,
9551 "TSO packet len %d hlen %d total len %d tso size %d\n",
9552 skb->len, hlen, skb_headlen(skb),
9553 skb_shinfo(skb)->gso_size);
9555 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9557 if (unlikely(skb_headlen(skb) > hlen))
9558 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9561 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9562 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9563 pbd->tcp_flags = pbd_tcp_flags(skb);
9565 if (xmit_type & XMIT_GSO_V4) {
9566 pbd->ip_id = swab16(ip_hdr(skb)->id);
9567 pbd->tcp_pseudo_csum =
9568 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9570 0, IPPROTO_TCP, 0));
9573 pbd->tcp_pseudo_csum =
9574 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9575 &ipv6_hdr(skb)->daddr,
9576 0, IPPROTO_TCP, 0));
9578 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9581 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9582 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9584 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9585 tx_bd = &fp->tx_desc_ring[bd_prod];
9587 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9588 frag->size, PCI_DMA_TODEVICE);
9590 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9591 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9592 tx_bd->nbytes = cpu_to_le16(frag->size);
9593 tx_bd->vlan = cpu_to_le16(pkt_prod);
9594 tx_bd->bd_flags.as_bitfield = 0;
9596 DP(NETIF_MSG_TX_QUEUED,
9597 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9598 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9599 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9602 /* now at last mark the BD as the last BD */
9603 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9605 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9606 tx_bd, tx_bd->bd_flags.as_bitfield);
9608 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9610 /* now send a tx doorbell, counting the next BD
9611 * if the packet contains or ends with it
9613 if (TX_BD_POFF(bd_prod) < nbd)
9617 DP(NETIF_MSG_TX_QUEUED,
9618 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9619 " tcp_flags %x xsum %x seq %u hlen %u\n",
9620 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9621 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9622 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9624 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9626 fp->hw_tx_prods->bds_prod =
9627 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9628 mb(); /* FW restriction: must not reorder writing nbd and packets */
9629 fp->hw_tx_prods->packets_prod =
9630 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9631 DOORBELL(bp, FP_IDX(fp), 0);
9635 fp->tx_bd_prod += nbd;
9636 dev->trans_start = jiffies;
9638 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9639 netif_stop_queue(dev);
9640 bp->eth_stats.driver_xoff++;
9641 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9642 netif_wake_queue(dev);
9646 return NETDEV_TX_OK;
9649 /* called with rtnl_lock */
9650 static int bnx2x_open(struct net_device *dev)
9652 struct bnx2x *bp = netdev_priv(dev);
9654 bnx2x_set_power_state(bp, PCI_D0);
9656 return bnx2x_nic_load(bp, LOAD_OPEN);
9659 /* called with rtnl_lock */
9660 static int bnx2x_close(struct net_device *dev)
9662 struct bnx2x *bp = netdev_priv(dev);
9664 /* Unload the driver, release IRQs */
9665 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9666 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9667 if (!CHIP_REV_IS_SLOW(bp))
9668 bnx2x_set_power_state(bp, PCI_D3hot);
9673 /* called with netif_tx_lock from set_multicast */
9674 static void bnx2x_set_rx_mode(struct net_device *dev)
9676 struct bnx2x *bp = netdev_priv(dev);
9677 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9678 int port = BP_PORT(bp);
9680 if (bp->state != BNX2X_STATE_OPEN) {
9681 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9685 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9687 if (dev->flags & IFF_PROMISC)
9688 rx_mode = BNX2X_RX_MODE_PROMISC;
9690 else if ((dev->flags & IFF_ALLMULTI) ||
9691 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9692 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9694 else { /* some multicasts */
9695 if (CHIP_IS_E1(bp)) {
9697 struct dev_mc_list *mclist;
9698 struct mac_configuration_cmd *config =
9699 bnx2x_sp(bp, mcast_config);
9701 for (i = 0, mclist = dev->mc_list;
9702 mclist && (i < dev->mc_count);
9703 i++, mclist = mclist->next) {
9705 config->config_table[i].
9706 cam_entry.msb_mac_addr =
9707 swab16(*(u16 *)&mclist->dmi_addr[0]);
9708 config->config_table[i].
9709 cam_entry.middle_mac_addr =
9710 swab16(*(u16 *)&mclist->dmi_addr[2]);
9711 config->config_table[i].
9712 cam_entry.lsb_mac_addr =
9713 swab16(*(u16 *)&mclist->dmi_addr[4]);
9714 config->config_table[i].cam_entry.flags =
9716 config->config_table[i].
9717 target_table_entry.flags = 0;
9718 config->config_table[i].
9719 target_table_entry.client_id = 0;
9720 config->config_table[i].
9721 target_table_entry.vlan_id = 0;
9724 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9725 config->config_table[i].
9726 cam_entry.msb_mac_addr,
9727 config->config_table[i].
9728 cam_entry.middle_mac_addr,
9729 config->config_table[i].
9730 cam_entry.lsb_mac_addr);
9732 old = config->hdr.length_6b;
9734 for (; i < old; i++) {
9735 if (CAM_IS_INVALID(config->
9737 i--; /* already invalidated */
9741 CAM_INVALIDATE(config->
9746 if (CHIP_REV_IS_SLOW(bp))
9747 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9749 offset = BNX2X_MAX_MULTICAST*(1 + port);
9751 config->hdr.length_6b = i;
9752 config->hdr.offset = offset;
9753 config->hdr.client_id = BP_CL_ID(bp);
9754 config->hdr.reserved1 = 0;
9756 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9757 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9758 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9761 /* Accept one or more multicasts */
9762 struct dev_mc_list *mclist;
9763 u32 mc_filter[MC_HASH_SIZE];
9764 u32 crc, bit, regidx;
9767 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9769 for (i = 0, mclist = dev->mc_list;
9770 mclist && (i < dev->mc_count);
9771 i++, mclist = mclist->next) {
9773 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9774 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9775 mclist->dmi_addr[0], mclist->dmi_addr[1],
9776 mclist->dmi_addr[2], mclist->dmi_addr[3],
9777 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9779 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9780 bit = (crc >> 24) & 0xff;
9783 mc_filter[regidx] |= (1 << bit);
9786 for (i = 0; i < MC_HASH_SIZE; i++)
9787 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9792 bp->rx_mode = rx_mode;
9793 bnx2x_set_storm_rx_mode(bp);
9796 /* called with rtnl_lock */
9797 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9799 struct sockaddr *addr = p;
9800 struct bnx2x *bp = netdev_priv(dev);
9802 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9805 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9806 if (netif_running(dev)) {
9808 bnx2x_set_mac_addr_e1(bp);
9810 bnx2x_set_mac_addr_e1h(bp);
9816 /* called with rtnl_lock */
9817 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9819 struct mii_ioctl_data *data = if_mii(ifr);
9820 struct bnx2x *bp = netdev_priv(dev);
9825 data->phy_id = bp->port.phy_addr;
9832 if (!netif_running(dev))
9835 mutex_lock(&bp->port.phy_mutex);
9836 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9837 DEFAULT_PHY_DEV_ADDR,
9838 (data->reg_num & 0x1f), &mii_regval);
9839 data->val_out = mii_regval;
9840 mutex_unlock(&bp->port.phy_mutex);
9845 if (!capable(CAP_NET_ADMIN))
9848 if (!netif_running(dev))
9851 mutex_lock(&bp->port.phy_mutex);
9852 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9853 DEFAULT_PHY_DEV_ADDR,
9854 (data->reg_num & 0x1f), data->val_in);
9855 mutex_unlock(&bp->port.phy_mutex);
9866 /* called with rtnl_lock */
9867 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9869 struct bnx2x *bp = netdev_priv(dev);
9872 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9873 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9876 /* This does not race with packet allocation
9877 * because the actual alloc size is
9878 * only updated as part of load
9882 if (netif_running(dev)) {
9883 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9884 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9890 static void bnx2x_tx_timeout(struct net_device *dev)
9892 struct bnx2x *bp = netdev_priv(dev);
9894 #ifdef BNX2X_STOP_ON_ERROR
9898 /* This allows the netif to be shutdown gracefully before resetting */
9899 schedule_work(&bp->reset_task);
9903 /* called with rtnl_lock */
9904 static void bnx2x_vlan_rx_register(struct net_device *dev,
9905 struct vlan_group *vlgrp)
9907 struct bnx2x *bp = netdev_priv(dev);
9910 if (netif_running(dev))
9911 bnx2x_set_client_config(bp);
9916 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9917 static void poll_bnx2x(struct net_device *dev)
9919 struct bnx2x *bp = netdev_priv(dev);
9921 disable_irq(bp->pdev->irq);
9922 bnx2x_interrupt(bp->pdev->irq, dev);
9923 enable_irq(bp->pdev->irq);
9927 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9928 struct net_device *dev)
9933 SET_NETDEV_DEV(dev, &pdev->dev);
9934 bp = netdev_priv(dev);
9939 bp->func = PCI_FUNC(pdev->devfn);
9941 rc = pci_enable_device(pdev);
9943 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9947 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9948 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9951 goto err_out_disable;
9954 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9955 printk(KERN_ERR PFX "Cannot find second PCI device"
9956 " base address, aborting\n");
9958 goto err_out_disable;
9961 if (atomic_read(&pdev->enable_cnt) == 1) {
9962 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9964 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9966 goto err_out_disable;
9969 pci_set_master(pdev);
9970 pci_save_state(pdev);
9973 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9974 if (bp->pm_cap == 0) {
9975 printk(KERN_ERR PFX "Cannot find power management"
9976 " capability, aborting\n");
9978 goto err_out_release;
9981 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9982 if (bp->pcie_cap == 0) {
9983 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9986 goto err_out_release;
9989 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9990 bp->flags |= USING_DAC_FLAG;
9991 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9992 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9993 " failed, aborting\n");
9995 goto err_out_release;
9998 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9999 printk(KERN_ERR PFX "System does not support DMA,"
10002 goto err_out_release;
10005 dev->mem_start = pci_resource_start(pdev, 0);
10006 dev->base_addr = dev->mem_start;
10007 dev->mem_end = pci_resource_end(pdev, 0);
10009 dev->irq = pdev->irq;
10011 bp->regview = ioremap_nocache(dev->base_addr,
10012 pci_resource_len(pdev, 0));
10013 if (!bp->regview) {
10014 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10016 goto err_out_release;
10019 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10020 min_t(u64, BNX2X_DB_SIZE,
10021 pci_resource_len(pdev, 2)));
10022 if (!bp->doorbells) {
10023 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10025 goto err_out_unmap;
10028 bnx2x_set_power_state(bp, PCI_D0);
10030 /* clean indirect addresses */
10031 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10032 PCICFG_VENDOR_ID_OFFSET);
10033 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10034 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10035 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10036 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10038 dev->hard_start_xmit = bnx2x_start_xmit;
10039 dev->watchdog_timeo = TX_TIMEOUT;
10041 dev->ethtool_ops = &bnx2x_ethtool_ops;
10042 dev->open = bnx2x_open;
10043 dev->stop = bnx2x_close;
10044 dev->set_multicast_list = bnx2x_set_rx_mode;
10045 dev->set_mac_address = bnx2x_change_mac_addr;
10046 dev->do_ioctl = bnx2x_ioctl;
10047 dev->change_mtu = bnx2x_change_mtu;
10048 dev->tx_timeout = bnx2x_tx_timeout;
10050 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10052 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10053 dev->poll_controller = poll_bnx2x;
10055 dev->features |= NETIF_F_SG;
10056 dev->features |= NETIF_F_HW_CSUM;
10057 if (bp->flags & USING_DAC_FLAG)
10058 dev->features |= NETIF_F_HIGHDMA;
10060 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10062 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10063 dev->features |= NETIF_F_TSO6;
10069 iounmap(bp->regview);
10070 bp->regview = NULL;
10072 if (bp->doorbells) {
10073 iounmap(bp->doorbells);
10074 bp->doorbells = NULL;
10078 if (atomic_read(&pdev->enable_cnt) == 1)
10079 pci_release_regions(pdev);
10082 pci_disable_device(pdev);
10083 pci_set_drvdata(pdev, NULL);
10089 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10091 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10093 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10097 /* return value of 1=2.5GHz 2=5GHz */
10098 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10100 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10102 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10106 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10107 const struct pci_device_id *ent)
10109 static int version_printed;
10110 struct net_device *dev = NULL;
10113 DECLARE_MAC_BUF(mac);
10115 if (version_printed++ == 0)
10116 printk(KERN_INFO "%s", version);
10118 /* dev zeroed in init_etherdev */
10119 dev = alloc_etherdev(sizeof(*bp));
10121 printk(KERN_ERR PFX "Cannot allocate net device\n");
10125 netif_carrier_off(dev);
10127 bp = netdev_priv(dev);
10128 bp->msglevel = debug;
10130 rc = bnx2x_init_dev(pdev, dev);
10136 rc = register_netdev(dev);
10138 dev_err(&pdev->dev, "Cannot register net device\n");
10139 goto init_one_exit;
10142 pci_set_drvdata(pdev, dev);
10144 rc = bnx2x_init_bp(bp);
10146 unregister_netdev(dev);
10147 goto init_one_exit;
10150 bp->common.name = board_info[ent->driver_data].name;
10151 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10152 " IRQ %d, ", dev->name, bp->common.name,
10153 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10154 bnx2x_get_pcie_width(bp),
10155 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10156 dev->base_addr, bp->pdev->irq);
10157 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10162 iounmap(bp->regview);
10165 iounmap(bp->doorbells);
10169 if (atomic_read(&pdev->enable_cnt) == 1)
10170 pci_release_regions(pdev);
10172 pci_disable_device(pdev);
10173 pci_set_drvdata(pdev, NULL);
10178 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10180 struct net_device *dev = pci_get_drvdata(pdev);
10184 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10187 bp = netdev_priv(dev);
10189 unregister_netdev(dev);
10192 iounmap(bp->regview);
10195 iounmap(bp->doorbells);
10199 if (atomic_read(&pdev->enable_cnt) == 1)
10200 pci_release_regions(pdev);
10202 pci_disable_device(pdev);
10203 pci_set_drvdata(pdev, NULL);
10206 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10208 struct net_device *dev = pci_get_drvdata(pdev);
10212 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10215 bp = netdev_priv(dev);
10219 pci_save_state(pdev);
10221 if (!netif_running(dev)) {
10226 netif_device_detach(dev);
10228 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10230 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10237 static int bnx2x_resume(struct pci_dev *pdev)
10239 struct net_device *dev = pci_get_drvdata(pdev);
10244 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10247 bp = netdev_priv(dev);
10251 pci_restore_state(pdev);
10253 if (!netif_running(dev)) {
10258 bnx2x_set_power_state(bp, PCI_D0);
10259 netif_device_attach(dev);
10261 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10269 * bnx2x_io_error_detected - called when PCI error is detected
10270 * @pdev: Pointer to PCI device
10271 * @state: The current pci connection state
10273 * This function is called after a PCI bus error affecting
10274 * this device has been detected.
10276 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10277 pci_channel_state_t state)
10279 struct net_device *dev = pci_get_drvdata(pdev);
10280 struct bnx2x *bp = netdev_priv(dev);
10284 netif_device_detach(dev);
10286 if (netif_running(dev))
10287 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10289 pci_disable_device(pdev);
10293 /* Request a slot reset */
10294 return PCI_ERS_RESULT_NEED_RESET;
10298 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10299 * @pdev: Pointer to PCI device
10301 * Restart the card from scratch, as if from a cold-boot.
10303 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10305 struct net_device *dev = pci_get_drvdata(pdev);
10306 struct bnx2x *bp = netdev_priv(dev);
10310 if (pci_enable_device(pdev)) {
10311 dev_err(&pdev->dev,
10312 "Cannot re-enable PCI device after reset\n");
10314 return PCI_ERS_RESULT_DISCONNECT;
10317 pci_set_master(pdev);
10318 pci_restore_state(pdev);
10320 if (netif_running(dev))
10321 bnx2x_set_power_state(bp, PCI_D0);
10325 return PCI_ERS_RESULT_RECOVERED;
10329 * bnx2x_io_resume - called when traffic can start flowing again
10330 * @pdev: Pointer to PCI device
10332 * This callback is called when the error recovery driver tells us that
10333 * its OK to resume normal operation.
10335 static void bnx2x_io_resume(struct pci_dev *pdev)
10337 struct net_device *dev = pci_get_drvdata(pdev);
10338 struct bnx2x *bp = netdev_priv(dev);
10342 if (netif_running(dev))
10343 bnx2x_nic_load(bp, LOAD_OPEN);
10345 netif_device_attach(dev);
10350 static struct pci_error_handlers bnx2x_err_handler = {
10351 .error_detected = bnx2x_io_error_detected,
10352 .slot_reset = bnx2x_io_slot_reset,
10353 .resume = bnx2x_io_resume,
10356 static struct pci_driver bnx2x_pci_driver = {
10357 .name = DRV_MODULE_NAME,
10358 .id_table = bnx2x_pci_tbl,
10359 .probe = bnx2x_init_one,
10360 .remove = __devexit_p(bnx2x_remove_one),
10361 .suspend = bnx2x_suspend,
10362 .resume = bnx2x_resume,
10363 .err_handler = &bnx2x_err_handler,
10366 static int __init bnx2x_init(void)
10368 return pci_register_driver(&bnx2x_pci_driver);
10371 static void __exit bnx2x_cleanup(void)
10373 pci_unregister_driver(&bnx2x_pci_driver);
10376 module_init(bnx2x_init);
10377 module_exit(bnx2x_cleanup);