1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version[] __devinitdata =
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
79 static int disable_tpa;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100 enum bnx2x_board_type {
106 /* indexed by board_type, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
155 static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 struct dmae_command *dmae = &bp->init_dmae;
183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 mutex_lock(&bp->dmae_mutex);
197 memset(dmae, 0, sizeof(struct dmae_command));
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 DMAE_CMD_ENDIANITY_DW_SWAP |
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216 dmae->comp_val = DMAE_COMP_VAL;
218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 BNX2X_ERR("dmae timeout!\n");
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
250 mutex_unlock(&bp->dmae_mutex);
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 struct dmae_command *dmae = &bp->init_dmae;
256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 mutex_lock(&bp->dmae_mutex);
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 DMAE_CMD_ENDIANITY_DW_SWAP |
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_val = DMAE_COMP_VAL;
294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
308 while (*wb_comp != DMAE_COMP_VAL) {
311 BNX2X_ERR("dmae timeout!\n");
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325 mutex_unlock(&bp->dmae_mutex);
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343 REG_RD_DMAE(bp, reg, wb_data, 2);
345 return HILO_U64(wb_data[0], wb_data[1]);
349 static int bnx2x_mc_assert(struct bnx2x *bp)
353 u32 row0, row1, row2, row3;
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
470 static void bnx2x_fw_dump(struct bnx2x *bp)
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 printk(KERN_CONT "%s", (char *)data);
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 printk(KERN_CONT "%s", (char *)data);
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
497 static void bnx2x_panic_dump(struct bnx2x *bp)
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505 BNX2X_ERR("begin crash dump -----------------\n");
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
559 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
580 " spq_prod_idx(%u)\n",
581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586 BNX2X_ERR("end crash dump -----------------\n");
589 static void bnx2x_int_enable(struct bnx2x *bp)
591 int port = BP_PORT(bp);
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
609 REG_WR(bp, addr, val);
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
615 val, port, addr, msix);
617 REG_WR(bp, addr, val);
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
624 /* enable nig attention */
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
634 static void bnx2x_int_disable(struct bnx2x *bp)
636 int port = BP_PORT(bp);
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658 /* disable interrupt handling */
659 atomic_inc(&bp->intr_sem);
660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
663 /* make sure all ISRs are done */
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
671 synchronize_irq(bp->pdev->irq);
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
680 * General service functions
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684 u8 storm, u16 index, u8 op, u8 update)
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack;
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 struct host_status_block *fpsb = fp->status_blk;
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
733 * fast path service functions
736 /* free skb in the packet ring at pos idx
737 * return idx of last bd freed
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
758 new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
761 BNX2X_ERR("BAD nbd!\n");
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 tx_buf->first_bd = 0;
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
810 barrier(); /* Tell compiler that prod and cons can change */
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
818 #ifdef BNX2X_STOP_ON_ERROR
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
824 return (s16)(fp->bp->tx_ring_size) - used;
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
841 while (sw_cons != hw_cons) {
844 pkt_cons = TX_BD(sw_cons);
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
849 hw_cons, sw_cons, pkt_cons);
851 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
877 netif_tx_lock(bp->dev);
879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
884 netif_tx_unlock(bp->dev);
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889 union eth_rx_cqe *rr_cqe)
891 struct bnx2x *bp = fp->bp;
892 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
897 FP_IDX(fp), cid, command, bp->state,
898 rr_cqe->ramrod_cqe.ramrod_type);
903 switch (command | fp->state) {
904 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905 BNX2X_FP_STATE_OPENING):
906 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 fp->state = BNX2X_FP_STATE_OPEN;
911 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 fp->state = BNX2X_FP_STATE_HALTED;
918 BNX2X_ERR("unexpected MC reply (%d) "
919 "fp->state is %x\n", command, fp->state);
922 mb(); /* force bnx2x_wait_ramrod() to see the change */
926 switch (command | bp->state) {
927 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929 bp->state = BNX2X_STATE_OPEN;
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935 fp->state = BNX2X_FP_STATE_HALTED;
938 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
943 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
945 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
946 bp->set_mac_pending = 0;
949 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
950 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
954 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
958 mb(); /* force bnx2x_wait_ramrod() to see the change */
961 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
962 struct bnx2x_fastpath *fp, u16 index)
964 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
965 struct page *page = sw_buf->page;
966 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
968 /* Skip "next page" elements */
972 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
973 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
974 __free_pages(page, PAGES_PER_SGE_SHIFT);
981 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, int last)
986 for (i = 0; i < last; i++)
987 bnx2x_free_rx_sge(bp, fp, i);
990 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
991 struct bnx2x_fastpath *fp, u16 index)
993 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
994 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
995 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 if (unlikely(page == NULL))
1001 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1002 PCI_DMA_FROMDEVICE);
1003 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1004 __free_pages(page, PAGES_PER_SGE_SHIFT);
1008 sw_buf->page = page;
1009 pci_unmap_addr_set(sw_buf, mapping, mapping);
1011 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1012 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1018 struct bnx2x_fastpath *fp, u16 index)
1020 struct sk_buff *skb;
1021 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1022 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1026 if (unlikely(skb == NULL))
1029 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1030 PCI_DMA_FROMDEVICE);
1031 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1037 pci_unmap_addr_set(rx_buf, mapping, mapping);
1039 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1040 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045 /* note that we are not allocating a new skb,
1046 * we are just moving one from cons to prod
1047 * we are not creating a new mapping,
1048 * so there is no need to check for dma_mapping_error().
1050 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1051 struct sk_buff *skb, u16 cons, u16 prod)
1053 struct bnx2x *bp = fp->bp;
1054 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1055 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1056 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1057 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1059 pci_dma_sync_single_for_device(bp->pdev,
1060 pci_unmap_addr(cons_rx_buf, mapping),
1061 bp->rx_offset + RX_COPY_THRESH,
1062 PCI_DMA_FROMDEVICE);
1064 prod_rx_buf->skb = cons_rx_buf->skb;
1065 pci_unmap_addr_set(prod_rx_buf, mapping,
1066 pci_unmap_addr(cons_rx_buf, mapping));
1067 *prod_bd = *cons_bd;
1070 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 last_max = fp->last_max_sge;
1075 if (SUB_S16(idx, last_max) > 0)
1076 fp->last_max_sge = idx;
1079 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1083 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1084 int idx = RX_SGE_CNT * i - 1;
1086 for (j = 0; j < 2; j++) {
1087 SGE_MASK_CLEAR_BIT(fp, idx);
1093 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1094 struct eth_fast_path_rx_cqe *fp_cqe)
1096 struct bnx2x *bp = fp->bp;
1097 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1098 le16_to_cpu(fp_cqe->len_on_bd)) >>
1100 u16 last_max, last_elem, first_elem;
1107 /* First mark all used pages */
1108 for (i = 0; i < sge_len; i++)
1109 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1111 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1112 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1114 /* Here we assume that the last SGE index is the biggest */
1115 prefetch((void *)(fp->sge_mask));
1116 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1118 last_max = RX_SGE(fp->last_max_sge);
1119 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1120 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1122 /* If ring is not full */
1123 if (last_elem + 1 != first_elem)
1126 /* Now update the prod */
1127 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1128 if (likely(fp->sge_mask[i]))
1131 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1132 delta += RX_SGE_MASK_ELEM_SZ;
1136 fp->rx_sge_prod += delta;
1137 /* clear page-end entries */
1138 bnx2x_clear_sge_mask_next_elems(fp);
1141 DP(NETIF_MSG_RX_STATUS,
1142 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1143 fp->last_max_sge, fp->rx_sge_prod);
1146 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1148 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1149 memset(fp->sge_mask, 0xff,
1150 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1152 /* Clear the two last indeces in the page to 1:
1153 these are the indeces that correspond to the "next" element,
1154 hence will never be indicated and should be removed from
1155 the calculations. */
1156 bnx2x_clear_sge_mask_next_elems(fp);
1159 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1160 struct sk_buff *skb, u16 cons, u16 prod)
1162 struct bnx2x *bp = fp->bp;
1163 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1164 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1165 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 /* move empty skb from pool to prod and map it */
1169 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1170 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1171 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1172 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1174 /* move partial skb from cons to pool (don't unmap yet) */
1175 fp->tpa_pool[queue] = *cons_rx_buf;
1177 /* mark bin state as start - print error if current state != stop */
1178 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1179 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1181 fp->tpa_state[queue] = BNX2X_TPA_START;
1183 /* point prod_bd to new skb */
1184 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1185 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1187 #ifdef BNX2X_STOP_ON_ERROR
1188 fp->tpa_queue_used |= (1 << queue);
1189 #ifdef __powerpc64__
1190 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1194 fp->tpa_queue_used);
1198 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1199 struct sk_buff *skb,
1200 struct eth_fast_path_rx_cqe *fp_cqe,
1203 struct sw_rx_page *rx_pg, old_rx_pg;
1205 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1206 u32 i, frag_len, frag_size, pages;
1210 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1211 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1213 /* This is needed in order to enable forwarding support */
1215 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1216 max(frag_size, (u32)len_on_bd));
1218 #ifdef BNX2X_STOP_ON_ERROR
1219 if (pages > 8*PAGES_PER_SGE) {
1220 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1222 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1223 fp_cqe->pkt_len, len_on_bd);
1229 /* Run through the SGL and compose the fragmented skb */
1230 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1231 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1233 /* FW gives the indices of the SGE as if the ring is an array
1234 (meaning that "next" element will consume 2 indices) */
1235 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1236 rx_pg = &fp->rx_page_ring[sge_idx];
1240 /* If we fail to allocate a substitute page, we simply stop
1241 where we are and drop the whole packet */
1242 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1243 if (unlikely(err)) {
1244 bp->eth_stats.rx_skb_alloc_failed++;
1248 /* Unmap the page as we r going to pass it to the stack */
1249 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1250 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252 /* Add one frag and update the appropriate fields in the skb */
1253 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255 skb->data_len += frag_len;
1256 skb->truesize += frag_len;
1257 skb->len += frag_len;
1259 frag_size -= frag_len;
1265 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1266 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1270 struct sk_buff *skb = rx_buf->skb;
1272 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274 /* Unmap skb in the pool anyway, as we are going to change
1275 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1278 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1280 if (likely(new_skb)) {
1281 /* fix ip xsum and give it to the stack */
1282 /* (no need to map the new skb) */
1285 prefetch(((char *)(skb)) + 128);
1287 #ifdef BNX2X_STOP_ON_ERROR
1288 if (pad + len > bp->rx_buf_size) {
1289 BNX2X_ERR("skb_put is about to fail... "
1290 "pad %d len %d rx_buf_size %d\n",
1291 pad, len, bp->rx_buf_size);
1297 skb_reserve(skb, pad);
1300 skb->protocol = eth_type_trans(skb, bp->dev);
1301 skb->ip_summed = CHECKSUM_UNNECESSARY;
1306 iph = (struct iphdr *)skb->data;
1308 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1312 &cqe->fast_path_cqe, cqe_idx)) {
1314 if ((bp->vlgrp != NULL) &&
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN))
1317 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1318 le16_to_cpu(cqe->fast_path_cqe.
1322 netif_receive_skb(skb);
1324 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1325 " - dropping packet!\n");
1329 bp->dev->last_rx = jiffies;
1331 /* put new skb in bin */
1332 fp->tpa_pool[queue].skb = new_skb;
1335 /* else drop the packet and keep the buffer in the bin */
1336 DP(NETIF_MSG_RX_STATUS,
1337 "Failed to allocate new skb - dropping packet!\n");
1338 bp->eth_stats.rx_skb_alloc_failed++;
1341 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1345 struct bnx2x_fastpath *fp,
1346 u16 bd_prod, u16 rx_comp_prod,
1349 struct tstorm_eth_rx_producers rx_prods = {0};
1352 /* Update producers */
1353 rx_prods.bd_prod = bd_prod;
1354 rx_prods.cqe_prod = rx_comp_prod;
1355 rx_prods.sge_prod = rx_sge_prod;
1357 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1358 REG_WR(bp, BAR_TSTRORM_INTMEM +
1359 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1360 ((u32 *)&rx_prods)[i]);
1362 DP(NETIF_MSG_RX_STATUS,
1363 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1364 bd_prod, rx_comp_prod, rx_sge_prod);
1367 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369 struct bnx2x *bp = fp->bp;
1370 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1371 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1375 #ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
1388 bd_prod_fw = bd_prod;
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1401 while (sw_comp_cons != hw_comp_cons) {
1402 struct sw_rx_bd *rx_buf = NULL;
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418 cqe->fast_path_cqe.rss_hash_result,
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1422 /* is this a slowpath msg? */
1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424 bnx2x_sp_event(fp, cqe);
1427 /* this is an rx packet */
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
1439 queue = cqe->fast_path_cqe.queue_index;
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1446 bnx2x_tpa_start(fp, queue, skb,
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1460 /* This is a size of the linear data
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1482 prefetch(((char *)(skb)) + 128);
1484 /* is this an error packet? */
1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486 DP(NETIF_MSG_RX_ERR,
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
1489 bp->eth_stats.rx_err_discard_pkt++;
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1500 new_skb = netdev_alloc_skb(bp->dev,
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
1504 "ERROR packet dropped "
1505 "because of alloc failure\n");
1506 bp->eth_stats.rx_skb_alloc_failed++;
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_use_size,
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1529 DP(NETIF_MSG_RX_ERR,
1530 "ERROR packet dropped because "
1531 "of alloc failure\n");
1532 bp->eth_stats.rx_skb_alloc_failed++;
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1540 skb->ip_summed = CHECKSUM_NONE;
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
1545 bp->eth_stats.hw_csum_err++;
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1557 netif_receive_skb(skb);
1559 bp->dev->last_rx = jiffies;
1564 bd_cons = NEXT_RX_IDX(bd_cons);
1565 bd_prod = NEXT_RX_IDX(bd_prod);
1566 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1569 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1572 if (rx_pkt == budget)
1576 fp->rx_bd_cons = bd_cons;
1577 fp->rx_bd_prod = bd_prod_fw;
1578 fp->rx_comp_cons = sw_comp_cons;
1579 fp->rx_comp_prod = sw_comp_prod;
1581 /* Update producers */
1582 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584 mmiowb(); /* keep prod updates ordered */
1586 fp->rx_pkt += rx_pkt;
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594 struct bnx2x_fastpath *fp = fp_cookie;
1595 struct bnx2x *bp = fp->bp;
1596 struct net_device *dev = bp->dev;
1597 int index = FP_IDX(fp);
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606 index, FP_SB_ID(fp));
1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1609 #ifdef BNX2X_STOP_ON_ERROR
1610 if (unlikely(bp->panic))
1614 prefetch(fp->rx_cons_sb);
1615 prefetch(fp->tx_cons_sb);
1616 prefetch(&fp->status_blk->c_status_block.status_block_index);
1617 prefetch(&fp->status_blk->u_status_block.status_block_index);
1619 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1626 struct net_device *dev = dev_instance;
1627 struct bnx2x *bp = netdev_priv(dev);
1628 u16 status = bnx2x_ack_int(bp);
1631 /* Return here if interrupt is shared and it's not for us */
1632 if (unlikely(status == 0)) {
1633 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1638 #ifdef BNX2X_STOP_ON_ERROR
1639 if (unlikely(bp->panic))
1643 /* Return here if interrupt is disabled */
1644 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1645 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1649 mask = 0x2 << bp->fp[0].sb_id;
1650 if (status & mask) {
1651 struct bnx2x_fastpath *fp = &bp->fp[0];
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1664 if (unlikely(status & 0x1)) {
1665 schedule_work(&bp->sp_task);
1673 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1679 /* end of fast path */
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1686 * General service functions
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1692 u32 resource_bit = (1 << resource);
1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
1697 /* Validating that the resource is within range */
1698 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1700 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1712 /* Validating that the resource is not already taken */
1713 lock_status = REG_RD(bp, hw_lock_control_reg);
1714 if (lock_status & resource_bit) {
1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1716 lock_status, resource_bit);
1720 /* Try for 1 second every 5ms */
1721 for (cnt = 0; cnt < 200; cnt++) {
1722 /* Try to acquire the lock */
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg);
1725 if (lock_status & resource_bit)
1730 DP(NETIF_MSG_HW, "Timeout\n");
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1737 u32 resource_bit = (1 << resource);
1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
1741 /* Validating that the resource is within range */
1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1744 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1756 /* Validating that the resource is currently taken */
1757 lock_status = REG_RD(bp, hw_lock_control_reg);
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773 mutex_lock(&bp->port.phy_mutex);
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788 mutex_unlock(&bp->port.phy_mutex);
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 u32 spio_mask = (1 << spio_num);
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1911 static void bnx2x_link_report(struct bnx2x *bp)
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918 printk("%d Mbps ", bp->link_vars.line_speed);
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1923 printk("half duplex");
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1931 printk(", transmit ");
1933 printk("flow control ON");
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 if (!BP_NOMCP(bp)) {
1948 /* Initialize link parameters structure variables */
1949 bp->link_params.mtu = bp->dev->mtu;
1951 bnx2x_acquire_phy_lock(bp);
1952 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953 bnx2x_release_phy_lock(bp);
1955 if (bp->link_vars.link_up)
1956 bnx2x_link_report(bp);
1958 bnx2x_calc_fc_adv(bp);
1962 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1966 static void bnx2x_link_set(struct bnx2x *bp)
1968 if (!BP_NOMCP(bp)) {
1969 bnx2x_acquire_phy_lock(bp);
1970 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971 bnx2x_release_phy_lock(bp);
1973 bnx2x_calc_fc_adv(bp);
1975 BNX2X_ERR("Bootcode is missing -not setting link\n");
1978 static void bnx2x__link_reset(struct bnx2x *bp)
1980 if (!BP_NOMCP(bp)) {
1981 bnx2x_acquire_phy_lock(bp);
1982 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983 bnx2x_release_phy_lock(bp);
1985 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1988 static u8 bnx2x_link_test(struct bnx2x *bp)
1992 bnx2x_acquire_phy_lock(bp);
1993 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994 bnx2x_release_phy_lock(bp);
1999 /* Calculates the sum of vn_min_rates.
2000 It's needed for further normalizing of the min_rates.
2005 0 - if all the min_rates are 0.
2006 In the later case fainess algorithm should be deactivated.
2007 If not all min_rates are zero then those that are zeroes will
2010 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2012 int i, port = BP_PORT(bp);
2016 for (i = 0; i < E1HVN_MAX; i++) {
2018 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022 /* If min rate is zero - set it to 1 */
2024 vn_min_rate = DEF_MIN_RATE;
2028 wsum += vn_min_rate;
2032 /* ... only if all min rates are zeros - disable FAIRNESS */
2039 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2042 struct cmng_struct_per_port *m_cmng_port)
2044 u32 r_param = port_rate / 8;
2045 int port = BP_PORT(bp);
2048 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2050 /* Enable minmax only if we are in e1hmf mode */
2052 u32 fair_periodic_timeout_usec;
2055 /* Enable rate shaping and fairness */
2056 m_cmng_port->flags.cmng_vn_enable = 1;
2057 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058 m_cmng_port->flags.rate_shaping_enable = 1;
2061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062 " fairness will be disabled\n");
2064 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065 m_cmng_port->rs_vars.rs_periodic_timeout =
2066 RS_PERIODIC_TIMEOUT_USEC / 4;
2068 /* this is the threshold below which no timer arming will occur
2069 1.25 coefficient is for the threshold to be a little bigger
2070 than the real time, to compensate for timer in-accuracy */
2071 m_cmng_port->rs_vars.rs_threshold =
2072 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2074 /* resolution of fairness timer */
2075 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077 t_fair = T_FAIR_COEF / port_rate;
2079 /* this is the threshold below which we won't arm
2080 the timer anymore */
2081 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2083 /* we multiply by 1e3/8 to get bytes/msec.
2084 We don't want the credits to pass a credit
2085 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086 m_cmng_port->fair_vars.upper_bound =
2087 r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 m_cmng_port->fair_vars.fairness_timeout =
2090 fair_periodic_timeout_usec / 4;
2093 /* Disable rate shaping and fairness */
2094 m_cmng_port->flags.cmng_vn_enable = 0;
2095 m_cmng_port->flags.fairness_enable = 0;
2096 m_cmng_port->flags.rate_shaping_enable = 0;
2099 "Single function mode minmax will be disabled\n");
2102 /* Store it to internal memory */
2103 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106 ((u32 *)(m_cmng_port))[i]);
2109 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110 u32 wsum, u16 port_rate,
2111 struct cmng_struct_per_port *m_cmng_port)
2113 struct rate_shaping_vars_per_vn m_rs_vn;
2114 struct fairness_vars_per_vn m_fair_vn;
2115 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116 u16 vn_min_rate, vn_max_rate;
2119 /* If function is hidden - set min and max to zeroes */
2120 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2125 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128 if current min rate is zero - set it to 1.
2129 This is a requirment of the algorithm. */
2130 if ((vn_min_rate == 0) && wsum)
2131 vn_min_rate = DEF_MIN_RATE;
2132 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2136 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2137 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2139 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2142 /* global vn counter - maximal Mbps for this vn */
2143 m_rs_vn.vn_counter.rate = vn_max_rate;
2145 /* quota - number of bytes transmitted in this period */
2146 m_rs_vn.vn_counter.quota =
2147 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2149 #ifdef BNX2X_PER_PROT_QOS
2150 /* per protocol counter */
2151 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152 /* maximal Mbps for this protocol */
2153 m_rs_vn.protocol_counters[protocol].rate =
2154 protocol_max_rate[protocol];
2155 /* the quota in each timer period -
2156 number of bytes transmitted in this period */
2157 m_rs_vn.protocol_counters[protocol].quota =
2158 (u32)(rs_periodic_timeout_usec *
2160 protocol_counters[protocol].rate/8));
2165 /* credit for each period of the fairness algorithm:
2166 number of bytes in T_FAIR (the vn share the port rate).
2167 wsum should not be larger than 10000, thus
2168 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169 m_fair_vn.vn_credit_delta =
2170 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173 m_fair_vn.vn_credit_delta);
2176 #ifdef BNX2X_PER_PROT_QOS
2178 u32 protocolWeightSum = 0;
2180 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181 protocolWeightSum +=
2182 drvInit.protocol_min_rate[protocol];
2183 /* per protocol counter -
2184 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185 if (protocolWeightSum > 0) {
2187 protocol < NUM_OF_PROTOCOLS; protocol++)
2188 /* credit for each period of the
2189 fairness algorithm - number of bytes in
2190 T_FAIR (the protocol share the vn rate) */
2191 m_fair_vn.protocol_credit_delta[protocol] =
2192 (u32)((vn_min_rate / 8) * t_fair *
2193 protocol_min_rate / protocolWeightSum);
2198 /* Store it to internal memory */
2199 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202 ((u32 *)(&m_rs_vn))[i]);
2204 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207 ((u32 *)(&m_fair_vn))[i]);
2210 /* This function is called upon link interrupt */
2211 static void bnx2x_link_attn(struct bnx2x *bp)
2215 /* Make sure that we are synced with the current statistics */
2216 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2218 bnx2x_acquire_phy_lock(bp);
2219 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220 bnx2x_release_phy_lock(bp);
2222 if (bp->link_vars.link_up) {
2224 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225 struct host_port_stats *pstats;
2227 pstats = bnx2x_sp(bp, port_stats);
2228 /* reset old bmac stats */
2229 memset(&(pstats->mac_stx[0]), 0,
2230 sizeof(struct mac_stx));
2232 if ((bp->state == BNX2X_STATE_OPEN) ||
2233 (bp->state == BNX2X_STATE_DISABLED))
2234 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2237 /* indicate link status */
2238 bnx2x_link_report(bp);
2243 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244 if (vn == BP_E1HVN(bp))
2247 func = ((vn << 1) | BP_PORT(bp));
2249 /* Set the attention towards other drivers
2251 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2256 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257 struct cmng_struct_per_port m_cmng_port;
2259 int port = BP_PORT(bp);
2261 /* Init RATE SHAPING and FAIRNESS contexts */
2262 wsum = bnx2x_calc_vn_wsum(bp);
2263 bnx2x_init_port_minmax(bp, (int)wsum,
2264 bp->link_vars.line_speed,
2267 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269 wsum, bp->link_vars.line_speed,
2274 static void bnx2x__link_status_update(struct bnx2x *bp)
2276 if (bp->state != BNX2X_STATE_OPEN)
2279 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2281 if (bp->link_vars.link_up)
2282 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2284 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286 /* indicate link status */
2287 bnx2x_link_report(bp);
2290 static void bnx2x_pmf_update(struct bnx2x *bp)
2292 int port = BP_PORT(bp);
2296 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2298 /* enable nig attention */
2299 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2303 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2311 * General service functions
2314 /* the slow path queue is odd since completions arrive on the fastpath ring */
2315 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316 u32 data_hi, u32 data_lo, int common)
2318 int func = BP_FUNC(bp);
2320 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2322 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2326 #ifdef BNX2X_STOP_ON_ERROR
2327 if (unlikely(bp->panic))
2331 spin_lock_bh(&bp->spq_lock);
2333 if (!bp->spq_left) {
2334 BNX2X_ERR("BUG! SPQ ring full!\n");
2335 spin_unlock_bh(&bp->spq_lock);
2340 /* CID needs port number to be encoded int it */
2341 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2344 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2346 bp->spq_prod_bd->hdr.type |=
2347 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2349 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2354 if (bp->spq_prod_bd == bp->spq_last_bd) {
2355 bp->spq_prod_bd = bp->spq;
2356 bp->spq_prod_idx = 0;
2357 DP(NETIF_MSG_TIMER, "end of spq\n");
2364 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2367 spin_unlock_bh(&bp->spq_lock);
2371 /* acquire split MCP access lock register */
2372 static int bnx2x_acquire_alr(struct bnx2x *bp)
2379 for (j = 0; j < i*10; j++) {
2381 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383 if (val & (1L << 31))
2388 if (!(val & (1L << 31))) {
2389 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2396 /* release split MCP access lock register */
2397 static void bnx2x_release_alr(struct bnx2x *bp)
2401 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2404 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2406 struct host_def_status_block *def_sb = bp->def_status_blk;
2409 barrier(); /* status block is written to by the chip */
2410 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2411 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2414 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2415 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2418 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2419 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2422 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2423 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2426 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2427 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2434 * slow path service functions
2437 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2439 int port = BP_PORT(bp);
2440 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2441 COMMAND_REG_ATTN_BITS_SET);
2442 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2443 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2444 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2445 NIG_REG_MASK_INTERRUPT_PORT0;
2448 if (bp->attn_state & asserted)
2449 BNX2X_ERR("IGU ERROR\n");
2451 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2452 aeu_mask = REG_RD(bp, aeu_addr);
2454 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2455 aeu_mask, asserted);
2456 aeu_mask &= ~(asserted & 0xff);
2457 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2459 REG_WR(bp, aeu_addr, aeu_mask);
2460 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2462 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2463 bp->attn_state |= asserted;
2464 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2466 if (asserted & ATTN_HARD_WIRED_MASK) {
2467 if (asserted & ATTN_NIG_FOR_FUNC) {
2469 /* save nig interrupt mask */
2470 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2471 REG_WR(bp, nig_int_mask_addr, 0);
2473 bnx2x_link_attn(bp);
2475 /* handle unicore attn? */
2477 if (asserted & ATTN_SW_TIMER_4_FUNC)
2478 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2480 if (asserted & GPIO_2_FUNC)
2481 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2483 if (asserted & GPIO_3_FUNC)
2484 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2486 if (asserted & GPIO_4_FUNC)
2487 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2490 if (asserted & ATTN_GENERAL_ATTN_1) {
2491 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2494 if (asserted & ATTN_GENERAL_ATTN_2) {
2495 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2498 if (asserted & ATTN_GENERAL_ATTN_3) {
2499 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2500 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2503 if (asserted & ATTN_GENERAL_ATTN_4) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2507 if (asserted & ATTN_GENERAL_ATTN_5) {
2508 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2511 if (asserted & ATTN_GENERAL_ATTN_6) {
2512 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2513 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2517 } /* if hardwired */
2519 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2521 REG_WR(bp, hc_addr, asserted);
2523 /* now set back the mask */
2524 if (asserted & ATTN_NIG_FOR_FUNC)
2525 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2528 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2530 int port = BP_PORT(bp);
2534 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2535 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2537 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2539 val = REG_RD(bp, reg_offset);
2540 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2541 REG_WR(bp, reg_offset, val);
2543 BNX2X_ERR("SPIO5 hw attention\n");
2545 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2546 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2547 /* Fan failure attention */
2549 /* The PHY reset is controled by GPIO 1 */
2550 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2551 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2552 /* Low power mode is controled by GPIO 2 */
2553 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2554 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2555 /* mark the failure */
2556 bp->link_params.ext_phy_config &=
2557 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2558 bp->link_params.ext_phy_config |=
2559 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2561 dev_info.port_hw_config[port].
2562 external_phy_config,
2563 bp->link_params.ext_phy_config);
2564 /* log the failure */
2565 printk(KERN_ERR PFX "Fan Failure on Network"
2566 " Controller %s has caused the driver to"
2567 " shutdown the card to prevent permanent"
2568 " damage. Please contact Dell Support for"
2569 " assistance\n", bp->dev->name);
2577 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2579 val = REG_RD(bp, reg_offset);
2580 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2581 REG_WR(bp, reg_offset, val);
2583 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2584 (attn & HW_INTERRUT_ASSERT_SET_0));
2589 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2593 if (attn & BNX2X_DOORQ_ASSERT) {
2595 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2596 BNX2X_ERR("DB hw attention 0x%x\n", val);
2597 /* DORQ discard attention */
2599 BNX2X_ERR("FATAL error from DORQ\n");
2602 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2604 int port = BP_PORT(bp);
2607 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2608 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2610 val = REG_RD(bp, reg_offset);
2611 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2612 REG_WR(bp, reg_offset, val);
2614 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2615 (attn & HW_INTERRUT_ASSERT_SET_1));
2620 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2624 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2626 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2627 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2628 /* CFC error attention */
2630 BNX2X_ERR("FATAL error from CFC\n");
2633 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2635 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2636 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2637 /* RQ_USDMDP_FIFO_OVERFLOW */
2639 BNX2X_ERR("FATAL error from PXP\n");
2642 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2644 int port = BP_PORT(bp);
2647 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2648 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2650 val = REG_RD(bp, reg_offset);
2651 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2652 REG_WR(bp, reg_offset, val);
2654 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2655 (attn & HW_INTERRUT_ASSERT_SET_2));
2660 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2664 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2666 if (attn & BNX2X_PMF_LINK_ASSERT) {
2667 int func = BP_FUNC(bp);
2669 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2670 bnx2x__link_status_update(bp);
2671 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2673 bnx2x_pmf_update(bp);
2675 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2677 BNX2X_ERR("MC assert!\n");
2678 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2679 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2680 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2681 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2684 } else if (attn & BNX2X_MCP_ASSERT) {
2686 BNX2X_ERR("MCP assert!\n");
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2691 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2694 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2695 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2696 if (attn & BNX2X_GRC_TIMEOUT) {
2697 val = CHIP_IS_E1H(bp) ?
2698 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2699 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2701 if (attn & BNX2X_GRC_RSV) {
2702 val = CHIP_IS_E1H(bp) ?
2703 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2704 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2706 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2710 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2712 struct attn_route attn;
2713 struct attn_route group_mask;
2714 int port = BP_PORT(bp);
2720 /* need to take HW lock because MCP or other port might also
2721 try to handle this event */
2722 bnx2x_acquire_alr(bp);
2724 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2725 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2726 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2727 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2728 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2729 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2731 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2732 if (deasserted & (1 << index)) {
2733 group_mask = bp->attn_group[index];
2735 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2736 index, group_mask.sig[0], group_mask.sig[1],
2737 group_mask.sig[2], group_mask.sig[3]);
2739 bnx2x_attn_int_deasserted3(bp,
2740 attn.sig[3] & group_mask.sig[3]);
2741 bnx2x_attn_int_deasserted1(bp,
2742 attn.sig[1] & group_mask.sig[1]);
2743 bnx2x_attn_int_deasserted2(bp,
2744 attn.sig[2] & group_mask.sig[2]);
2745 bnx2x_attn_int_deasserted0(bp,
2746 attn.sig[0] & group_mask.sig[0]);
2748 if ((attn.sig[0] & group_mask.sig[0] &
2749 HW_PRTY_ASSERT_SET_0) ||
2750 (attn.sig[1] & group_mask.sig[1] &
2751 HW_PRTY_ASSERT_SET_1) ||
2752 (attn.sig[2] & group_mask.sig[2] &
2753 HW_PRTY_ASSERT_SET_2))
2754 BNX2X_ERR("FATAL HW block parity attention\n");
2758 bnx2x_release_alr(bp);
2760 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2763 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2765 REG_WR(bp, reg_addr, val);
2767 if (~bp->attn_state & deasserted)
2768 BNX2X_ERR("IGU ERROR\n");
2770 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2771 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774 aeu_mask = REG_RD(bp, reg_addr);
2776 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2777 aeu_mask, deasserted);
2778 aeu_mask |= (deasserted & 0xff);
2779 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2781 REG_WR(bp, reg_addr, aeu_mask);
2782 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2784 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2785 bp->attn_state &= ~deasserted;
2786 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2789 static void bnx2x_attn_int(struct bnx2x *bp)
2791 /* read local copy of bits */
2792 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2793 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2794 u32 attn_state = bp->attn_state;
2796 /* look for changed bits */
2797 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2798 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2801 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2802 attn_bits, attn_ack, asserted, deasserted);
2804 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2805 BNX2X_ERR("BAD attention state\n");
2807 /* handle bits that were raised */
2809 bnx2x_attn_int_asserted(bp, asserted);
2812 bnx2x_attn_int_deasserted(bp, deasserted);
2815 static void bnx2x_sp_task(struct work_struct *work)
2817 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2821 /* Return here if interrupt is disabled */
2822 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2823 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2827 status = bnx2x_update_dsb_idx(bp);
2828 /* if (status == 0) */
2829 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2831 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2837 /* CStorm events: query_stats, port delete ramrod */
2839 bp->stats_pending = 0;
2841 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2843 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2845 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2847 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2849 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2854 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2856 struct net_device *dev = dev_instance;
2857 struct bnx2x *bp = netdev_priv(dev);
2859 /* Return here if interrupt is disabled */
2860 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2861 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2865 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2867 #ifdef BNX2X_STOP_ON_ERROR
2868 if (unlikely(bp->panic))
2872 schedule_work(&bp->sp_task);
2877 /* end of slow path */
2881 /****************************************************************************
2883 ****************************************************************************/
2885 /* sum[hi:lo] += add[hi:lo] */
2886 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2889 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2892 /* difference = minuend - subtrahend */
2893 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2895 if (m_lo < s_lo) { \
2897 d_hi = m_hi - s_hi; \
2899 /* we can 'loan' 1 */ \
2901 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2903 /* m_hi <= s_hi */ \
2908 /* m_lo >= s_lo */ \
2909 if (m_hi < s_hi) { \
2913 /* m_hi >= s_hi */ \
2914 d_hi = m_hi - s_hi; \
2915 d_lo = m_lo - s_lo; \
2920 #define UPDATE_STAT64(s, t) \
2922 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2923 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2924 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2925 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2926 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2927 pstats->mac_stx[1].t##_lo, diff.lo); \
2930 #define UPDATE_STAT64_NIG(s, t) \
2932 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2933 diff.lo, new->s##_lo, old->s##_lo); \
2934 ADD_64(estats->t##_hi, diff.hi, \
2935 estats->t##_lo, diff.lo); \
2938 /* sum[hi:lo] += add */
2939 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2942 s_hi += (s_lo < a) ? 1 : 0; \
2945 #define UPDATE_EXTEND_STAT(s) \
2947 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2948 pstats->mac_stx[1].s##_lo, \
2952 #define UPDATE_EXTEND_TSTAT(s, t) \
2954 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2955 old_tclient->s = le32_to_cpu(tclient->s); \
2956 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2959 #define UPDATE_EXTEND_XSTAT(s, t) \
2961 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2962 old_xclient->s = le32_to_cpu(xclient->s); \
2963 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2967 * General service functions
2970 static inline long bnx2x_hilo(u32 *hiref)
2972 u32 lo = *(hiref + 1);
2973 #if (BITS_PER_LONG == 64)
2976 return HILO_U64(hi, lo);
2983 * Init service functions
2986 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2988 if (!bp->stats_pending) {
2989 struct eth_query_ramrod_data ramrod_data = {0};
2992 ramrod_data.drv_counter = bp->stats_counter++;
2993 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2994 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2996 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2997 ((u32 *)&ramrod_data)[1],
2998 ((u32 *)&ramrod_data)[0], 0);
3000 /* stats ramrod has it's own slot on the spq */
3002 bp->stats_pending = 1;
3007 static void bnx2x_stats_init(struct bnx2x *bp)
3009 int port = BP_PORT(bp);
3011 bp->executer_idx = 0;
3012 bp->stats_counter = 0;
3016 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3018 bp->port.port_stx = 0;
3019 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3021 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3022 bp->port.old_nig_stats.brb_discard =
3023 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3024 bp->port.old_nig_stats.brb_truncate =
3025 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3026 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3027 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3028 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3029 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3031 /* function stats */
3032 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3033 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3034 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3035 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3037 bp->stats_state = STATS_STATE_DISABLED;
3038 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3039 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3042 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3044 struct dmae_command *dmae = &bp->stats_dmae;
3045 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3047 *stats_comp = DMAE_COMP_VAL;
3050 if (bp->executer_idx) {
3051 int loader_idx = PMF_DMAE_C(bp);
3053 memset(dmae, 0, sizeof(struct dmae_command));
3055 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3056 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3057 DMAE_CMD_DST_RESET |
3059 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3061 DMAE_CMD_ENDIANITY_DW_SWAP |
3063 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3065 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3066 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3067 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3068 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3069 sizeof(struct dmae_command) *
3070 (loader_idx + 1)) >> 2;
3071 dmae->dst_addr_hi = 0;
3072 dmae->len = sizeof(struct dmae_command) >> 2;
3075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3076 dmae->comp_addr_hi = 0;
3080 bnx2x_post_dmae(bp, dmae, loader_idx);
3082 } else if (bp->func_stx) {
3084 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3088 static int bnx2x_stats_comp(struct bnx2x *bp)
3090 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3094 while (*stats_comp != DMAE_COMP_VAL) {
3096 BNX2X_ERR("timeout waiting for stats finished\n");
3106 * Statistics service functions
3109 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3111 struct dmae_command *dmae;
3113 int loader_idx = PMF_DMAE_C(bp);
3114 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3117 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3118 BNX2X_ERR("BUG!\n");
3122 bp->executer_idx = 0;
3124 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3126 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3128 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3130 DMAE_CMD_ENDIANITY_DW_SWAP |
3132 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3133 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3135 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3136 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3137 dmae->src_addr_lo = bp->port.port_stx >> 2;
3138 dmae->src_addr_hi = 0;
3139 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3140 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3141 dmae->len = DMAE_LEN32_RD_MAX;
3142 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3143 dmae->comp_addr_hi = 0;
3146 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3147 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3148 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3149 dmae->src_addr_hi = 0;
3150 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3151 DMAE_LEN32_RD_MAX * 4);
3152 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3153 DMAE_LEN32_RD_MAX * 4);
3154 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3155 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3156 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3157 dmae->comp_val = DMAE_COMP_VAL;
3160 bnx2x_hw_stats_post(bp);
3161 bnx2x_stats_comp(bp);
3164 static void bnx2x_port_stats_init(struct bnx2x *bp)
3166 struct dmae_command *dmae;
3167 int port = BP_PORT(bp);
3168 int vn = BP_E1HVN(bp);
3170 int loader_idx = PMF_DMAE_C(bp);
3172 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3175 if (!bp->link_vars.link_up || !bp->port.pmf) {
3176 BNX2X_ERR("BUG!\n");
3180 bp->executer_idx = 0;
3183 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3184 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3185 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3187 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3189 DMAE_CMD_ENDIANITY_DW_SWAP |
3191 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3192 (vn << DMAE_CMD_E1HVN_SHIFT));
3194 if (bp->port.port_stx) {
3196 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3197 dmae->opcode = opcode;
3198 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3199 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3200 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3201 dmae->dst_addr_hi = 0;
3202 dmae->len = sizeof(struct host_port_stats) >> 2;
3203 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3204 dmae->comp_addr_hi = 0;
3210 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3211 dmae->opcode = opcode;
3212 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3213 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3214 dmae->dst_addr_lo = bp->func_stx >> 2;
3215 dmae->dst_addr_hi = 0;
3216 dmae->len = sizeof(struct host_func_stats) >> 2;
3217 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3218 dmae->comp_addr_hi = 0;
3223 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3224 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3225 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3227 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3229 DMAE_CMD_ENDIANITY_DW_SWAP |
3231 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3232 (vn << DMAE_CMD_E1HVN_SHIFT));
3234 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3236 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3237 NIG_REG_INGRESS_BMAC0_MEM);
3239 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3240 BIGMAC_REGISTER_TX_STAT_GTBYT */
3241 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242 dmae->opcode = opcode;
3243 dmae->src_addr_lo = (mac_addr +
3244 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3245 dmae->src_addr_hi = 0;
3246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3248 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3249 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3250 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3251 dmae->comp_addr_hi = 0;
3254 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3255 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257 dmae->opcode = opcode;
3258 dmae->src_addr_lo = (mac_addr +
3259 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3260 dmae->src_addr_hi = 0;
3261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3262 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3263 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3264 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3265 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3266 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3268 dmae->comp_addr_hi = 0;
3271 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3273 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3275 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3276 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3277 dmae->opcode = opcode;
3278 dmae->src_addr_lo = (mac_addr +
3279 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3280 dmae->src_addr_hi = 0;
3281 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3282 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3283 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3284 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285 dmae->comp_addr_hi = 0;
3288 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3289 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3290 dmae->opcode = opcode;
3291 dmae->src_addr_lo = (mac_addr +
3292 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3293 dmae->src_addr_hi = 0;
3294 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3295 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3296 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3297 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3299 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3300 dmae->comp_addr_hi = 0;
3303 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305 dmae->opcode = opcode;
3306 dmae->src_addr_lo = (mac_addr +
3307 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3308 dmae->src_addr_hi = 0;
3309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3310 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3311 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3312 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3313 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315 dmae->comp_addr_hi = 0;
3320 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321 dmae->opcode = opcode;
3322 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3323 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3324 dmae->src_addr_hi = 0;
3325 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3327 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3332 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3333 dmae->opcode = opcode;
3334 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3335 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3336 dmae->src_addr_hi = 0;
3337 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3338 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3339 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3340 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3341 dmae->len = (2*sizeof(u32)) >> 2;
3342 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3343 dmae->comp_addr_hi = 0;
3346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3348 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3349 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3351 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3353 DMAE_CMD_ENDIANITY_DW_SWAP |
3355 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3356 (vn << DMAE_CMD_E1HVN_SHIFT));
3357 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3358 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3359 dmae->src_addr_hi = 0;
3360 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3361 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3362 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3363 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3364 dmae->len = (2*sizeof(u32)) >> 2;
3365 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3366 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3367 dmae->comp_val = DMAE_COMP_VAL;
3372 static void bnx2x_func_stats_init(struct bnx2x *bp)
3374 struct dmae_command *dmae = &bp->stats_dmae;
3375 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3378 if (!bp->func_stx) {
3379 BNX2X_ERR("BUG!\n");
3383 bp->executer_idx = 0;
3384 memset(dmae, 0, sizeof(struct dmae_command));
3386 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3387 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3388 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3390 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3392 DMAE_CMD_ENDIANITY_DW_SWAP |
3394 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3395 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3396 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3397 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3398 dmae->dst_addr_lo = bp->func_stx >> 2;
3399 dmae->dst_addr_hi = 0;
3400 dmae->len = sizeof(struct host_func_stats) >> 2;
3401 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3402 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3403 dmae->comp_val = DMAE_COMP_VAL;
3408 static void bnx2x_stats_start(struct bnx2x *bp)
3411 bnx2x_port_stats_init(bp);
3413 else if (bp->func_stx)
3414 bnx2x_func_stats_init(bp);
3416 bnx2x_hw_stats_post(bp);
3417 bnx2x_storm_stats_post(bp);
3420 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3422 bnx2x_stats_comp(bp);
3423 bnx2x_stats_pmf_update(bp);
3424 bnx2x_stats_start(bp);
3427 static void bnx2x_stats_restart(struct bnx2x *bp)
3429 bnx2x_stats_comp(bp);
3430 bnx2x_stats_start(bp);
3433 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3435 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3436 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3437 struct regpair diff;
3439 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3440 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3441 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3442 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3443 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3444 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3445 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3446 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3447 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3448 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3449 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3450 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3451 UPDATE_STAT64(tx_stat_gt127,
3452 tx_stat_etherstatspkts65octetsto127octets);
3453 UPDATE_STAT64(tx_stat_gt255,
3454 tx_stat_etherstatspkts128octetsto255octets);
3455 UPDATE_STAT64(tx_stat_gt511,
3456 tx_stat_etherstatspkts256octetsto511octets);
3457 UPDATE_STAT64(tx_stat_gt1023,
3458 tx_stat_etherstatspkts512octetsto1023octets);
3459 UPDATE_STAT64(tx_stat_gt1518,
3460 tx_stat_etherstatspkts1024octetsto1522octets);
3461 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3462 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3463 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3464 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3465 UPDATE_STAT64(tx_stat_gterr,
3466 tx_stat_dot3statsinternalmactransmiterrors);
3467 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3470 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3472 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3473 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3475 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3476 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3477 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3478 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3479 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3480 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3481 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3482 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3483 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3484 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3485 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3486 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3487 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3488 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3489 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3490 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3491 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3492 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3493 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3494 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3495 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3496 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3497 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3498 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3503 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3504 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3508 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3510 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3511 struct nig_stats *old = &(bp->port.old_nig_stats);
3512 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3513 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3514 struct regpair diff;
3516 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3517 bnx2x_bmac_stats_update(bp);
3519 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3520 bnx2x_emac_stats_update(bp);
3522 else { /* unreached */
3523 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3527 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3528 new->brb_discard - old->brb_discard);
3529 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3530 new->brb_truncate - old->brb_truncate);
3532 UPDATE_STAT64_NIG(egress_mac_pkt0,
3533 etherstatspkts1024octetsto1522octets);
3534 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3536 memcpy(old, new, sizeof(struct nig_stats));
3538 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3539 sizeof(struct mac_stx));
3540 estats->brb_drop_hi = pstats->brb_drop_hi;
3541 estats->brb_drop_lo = pstats->brb_drop_lo;
3543 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3548 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3550 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3551 int cl_id = BP_CL_ID(bp);
3552 struct tstorm_per_port_stats *tport =
3553 &stats->tstorm_common.port_statistics;
3554 struct tstorm_per_client_stats *tclient =
3555 &stats->tstorm_common.client_statistics[cl_id];
3556 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3557 struct xstorm_per_client_stats *xclient =
3558 &stats->xstorm_common.client_statistics[cl_id];
3559 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3560 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3561 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3564 /* are storm stats valid? */
3565 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3566 bp->stats_counter) {
3567 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3568 " tstorm counter (%d) != stats_counter (%d)\n",
3569 tclient->stats_counter, bp->stats_counter);
3572 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3573 bp->stats_counter) {
3574 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3575 " xstorm counter (%d) != stats_counter (%d)\n",
3576 xclient->stats_counter, bp->stats_counter);
3580 fstats->total_bytes_received_hi =
3581 fstats->valid_bytes_received_hi =
3582 le32_to_cpu(tclient->total_rcv_bytes.hi);
3583 fstats->total_bytes_received_lo =
3584 fstats->valid_bytes_received_lo =
3585 le32_to_cpu(tclient->total_rcv_bytes.lo);
3587 estats->error_bytes_received_hi =
3588 le32_to_cpu(tclient->rcv_error_bytes.hi);
3589 estats->error_bytes_received_lo =
3590 le32_to_cpu(tclient->rcv_error_bytes.lo);
3591 ADD_64(estats->error_bytes_received_hi,
3592 estats->rx_stat_ifhcinbadoctets_hi,
3593 estats->error_bytes_received_lo,
3594 estats->rx_stat_ifhcinbadoctets_lo);
3596 ADD_64(fstats->total_bytes_received_hi,
3597 estats->error_bytes_received_hi,
3598 fstats->total_bytes_received_lo,
3599 estats->error_bytes_received_lo);
3601 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3602 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3603 total_multicast_packets_received);
3604 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3605 total_broadcast_packets_received);
3607 fstats->total_bytes_transmitted_hi =
3608 le32_to_cpu(xclient->total_sent_bytes.hi);
3609 fstats->total_bytes_transmitted_lo =
3610 le32_to_cpu(xclient->total_sent_bytes.lo);
3612 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3613 total_unicast_packets_transmitted);
3614 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3615 total_multicast_packets_transmitted);
3616 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3617 total_broadcast_packets_transmitted);
3619 memcpy(estats, &(fstats->total_bytes_received_hi),
3620 sizeof(struct host_func_stats) - 2*sizeof(u32));
3622 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3623 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3624 estats->brb_truncate_discard =
3625 le32_to_cpu(tport->brb_truncate_discard);
3626 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3628 old_tclient->rcv_unicast_bytes.hi =
3629 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3630 old_tclient->rcv_unicast_bytes.lo =
3631 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3632 old_tclient->rcv_broadcast_bytes.hi =
3633 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3634 old_tclient->rcv_broadcast_bytes.lo =
3635 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3636 old_tclient->rcv_multicast_bytes.hi =
3637 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3638 old_tclient->rcv_multicast_bytes.lo =
3639 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3640 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3642 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3643 old_tclient->packets_too_big_discard =
3644 le32_to_cpu(tclient->packets_too_big_discard);
3645 estats->no_buff_discard =
3646 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3647 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3649 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3650 old_xclient->unicast_bytes_sent.hi =
3651 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3652 old_xclient->unicast_bytes_sent.lo =
3653 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3654 old_xclient->multicast_bytes_sent.hi =
3655 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3656 old_xclient->multicast_bytes_sent.lo =
3657 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3658 old_xclient->broadcast_bytes_sent.hi =
3659 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3660 old_xclient->broadcast_bytes_sent.lo =
3661 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3663 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3668 static void bnx2x_net_stats_update(struct bnx2x *bp)
3670 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3671 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3672 struct net_device_stats *nstats = &bp->dev->stats;
3674 nstats->rx_packets =
3675 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3676 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3677 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3679 nstats->tx_packets =
3680 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3681 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3682 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3684 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3686 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3688 nstats->rx_dropped = old_tclient->checksum_discard +
3689 estats->mac_discard;
3690 nstats->tx_dropped = 0;
3693 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3695 nstats->collisions =
3696 estats->tx_stat_dot3statssinglecollisionframes_lo +
3697 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3698 estats->tx_stat_dot3statslatecollisions_lo +
3699 estats->tx_stat_dot3statsexcessivecollisions_lo;
3701 estats->jabber_packets_received =
3702 old_tclient->packets_too_big_discard +
3703 estats->rx_stat_dot3statsframestoolong_lo;
3705 nstats->rx_length_errors =
3706 estats->rx_stat_etherstatsundersizepkts_lo +
3707 estats->jabber_packets_received;
3708 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3709 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3710 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3711 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3712 nstats->rx_missed_errors = estats->xxoverflow_discard;
3714 nstats->rx_errors = nstats->rx_length_errors +
3715 nstats->rx_over_errors +
3716 nstats->rx_crc_errors +
3717 nstats->rx_frame_errors +
3718 nstats->rx_fifo_errors +
3719 nstats->rx_missed_errors;
3721 nstats->tx_aborted_errors =
3722 estats->tx_stat_dot3statslatecollisions_lo +
3723 estats->tx_stat_dot3statsexcessivecollisions_lo;
3724 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3725 nstats->tx_fifo_errors = 0;
3726 nstats->tx_heartbeat_errors = 0;
3727 nstats->tx_window_errors = 0;
3729 nstats->tx_errors = nstats->tx_aborted_errors +
3730 nstats->tx_carrier_errors;
3733 static void bnx2x_stats_update(struct bnx2x *bp)
3735 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3738 if (*stats_comp != DMAE_COMP_VAL)
3742 update = (bnx2x_hw_stats_update(bp) == 0);
3744 update |= (bnx2x_storm_stats_update(bp) == 0);
3747 bnx2x_net_stats_update(bp);
3750 if (bp->stats_pending) {
3751 bp->stats_pending++;
3752 if (bp->stats_pending == 3) {
3753 BNX2X_ERR("stats not updated for 3 times\n");
3760 if (bp->msglevel & NETIF_MSG_TIMER) {
3761 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3762 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3763 struct net_device_stats *nstats = &bp->dev->stats;
3766 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3767 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3769 bnx2x_tx_avail(bp->fp),
3770 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3771 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3773 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3774 bp->fp->rx_comp_cons),
3775 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3776 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3777 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3778 estats->driver_xoff, estats->brb_drop_lo);
3779 printk(KERN_DEBUG "tstats: checksum_discard %u "
3780 "packets_too_big_discard %u no_buff_discard %u "
3781 "mac_discard %u mac_filter_discard %u "
3782 "xxovrflow_discard %u brb_truncate_discard %u "
3783 "ttl0_discard %u\n",
3784 old_tclient->checksum_discard,
3785 old_tclient->packets_too_big_discard,
3786 old_tclient->no_buff_discard, estats->mac_discard,
3787 estats->mac_filter_discard, estats->xxoverflow_discard,
3788 estats->brb_truncate_discard,
3789 old_tclient->ttl0_discard);
3791 for_each_queue(bp, i) {
3792 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3793 bnx2x_fp(bp, i, tx_pkt),
3794 bnx2x_fp(bp, i, rx_pkt),
3795 bnx2x_fp(bp, i, rx_calls));
3799 bnx2x_hw_stats_post(bp);
3800 bnx2x_storm_stats_post(bp);
3803 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3805 struct dmae_command *dmae;
3807 int loader_idx = PMF_DMAE_C(bp);
3808 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3810 bp->executer_idx = 0;
3812 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3814 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3816 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3818 DMAE_CMD_ENDIANITY_DW_SWAP |
3820 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3821 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3823 if (bp->port.port_stx) {
3825 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3827 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3829 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3830 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3831 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3832 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3833 dmae->dst_addr_hi = 0;
3834 dmae->len = sizeof(struct host_port_stats) >> 2;
3836 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3837 dmae->comp_addr_hi = 0;
3840 dmae->comp_addr_lo =
3841 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3842 dmae->comp_addr_hi =
3843 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3844 dmae->comp_val = DMAE_COMP_VAL;
3852 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3853 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3854 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3855 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3856 dmae->dst_addr_lo = bp->func_stx >> 2;
3857 dmae->dst_addr_hi = 0;
3858 dmae->len = sizeof(struct host_func_stats) >> 2;
3859 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3860 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3861 dmae->comp_val = DMAE_COMP_VAL;
3867 static void bnx2x_stats_stop(struct bnx2x *bp)
3871 bnx2x_stats_comp(bp);
3874 update = (bnx2x_hw_stats_update(bp) == 0);
3876 update |= (bnx2x_storm_stats_update(bp) == 0);
3879 bnx2x_net_stats_update(bp);
3882 bnx2x_port_stats_stop(bp);
3884 bnx2x_hw_stats_post(bp);
3885 bnx2x_stats_comp(bp);
3889 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3893 static const struct {
3894 void (*action)(struct bnx2x *bp);
3895 enum bnx2x_stats_state next_state;
3896 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3899 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3900 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3901 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3902 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3905 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3906 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3907 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3908 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3912 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3914 enum bnx2x_stats_state state = bp->stats_state;
3916 bnx2x_stats_stm[state][event].action(bp);
3917 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3919 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3920 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3921 state, event, bp->stats_state);
3924 static void bnx2x_timer(unsigned long data)
3926 struct bnx2x *bp = (struct bnx2x *) data;
3928 if (!netif_running(bp->dev))
3931 if (atomic_read(&bp->intr_sem) != 0)
3935 struct bnx2x_fastpath *fp = &bp->fp[0];
3938 bnx2x_tx_int(fp, 1000);
3939 rc = bnx2x_rx_int(fp, 1000);
3942 if (!BP_NOMCP(bp)) {
3943 int func = BP_FUNC(bp);
3947 ++bp->fw_drv_pulse_wr_seq;
3948 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3949 /* TBD - add SYSTEM_TIME */
3950 drv_pulse = bp->fw_drv_pulse_wr_seq;
3951 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3953 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3954 MCP_PULSE_SEQ_MASK);
3955 /* The delta between driver pulse and mcp response
3956 * should be 1 (before mcp response) or 0 (after mcp response)
3958 if ((drv_pulse != mcp_pulse) &&
3959 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3960 /* someone lost a heartbeat... */
3961 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3962 drv_pulse, mcp_pulse);
3966 if ((bp->state == BNX2X_STATE_OPEN) ||
3967 (bp->state == BNX2X_STATE_DISABLED))
3968 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3971 mod_timer(&bp->timer, jiffies + bp->current_interval);
3974 /* end of Statistics */
3979 * nic init service functions
3982 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3984 int port = BP_PORT(bp);
3986 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3987 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3988 sizeof(struct ustorm_def_status_block)/4);
3989 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3990 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3991 sizeof(struct cstorm_def_status_block)/4);
3994 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
3995 dma_addr_t mapping, int sb_id)
3997 int port = BP_PORT(bp);
3998 int func = BP_FUNC(bp);
4003 section = ((u64)mapping) + offsetof(struct host_status_block,
4005 sb->u_status_block.status_block_id = sb_id;
4007 REG_WR(bp, BAR_USTRORM_INTMEM +
4008 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4009 REG_WR(bp, BAR_USTRORM_INTMEM +
4010 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4012 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4013 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4015 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4016 REG_WR16(bp, BAR_USTRORM_INTMEM +
4017 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4020 section = ((u64)mapping) + offsetof(struct host_status_block,
4022 sb->c_status_block.status_block_id = sb_id;
4024 REG_WR(bp, BAR_CSTRORM_INTMEM +
4025 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4026 REG_WR(bp, BAR_CSTRORM_INTMEM +
4027 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4029 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4030 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4032 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4033 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4034 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4036 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4039 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4041 int func = BP_FUNC(bp);
4043 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4044 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4045 sizeof(struct ustorm_def_status_block)/4);
4046 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4047 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4048 sizeof(struct cstorm_def_status_block)/4);
4049 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4050 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4051 sizeof(struct xstorm_def_status_block)/4);
4052 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4053 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054 sizeof(struct tstorm_def_status_block)/4);
4057 static void bnx2x_init_def_sb(struct bnx2x *bp,
4058 struct host_def_status_block *def_sb,
4059 dma_addr_t mapping, int sb_id)
4061 int port = BP_PORT(bp);
4062 int func = BP_FUNC(bp);
4063 int index, val, reg_offset;
4067 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4068 atten_status_block);
4069 def_sb->atten_status_block.status_block_id = sb_id;
4073 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4074 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4076 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4077 bp->attn_group[index].sig[0] = REG_RD(bp,
4078 reg_offset + 0x10*index);
4079 bp->attn_group[index].sig[1] = REG_RD(bp,
4080 reg_offset + 0x4 + 0x10*index);
4081 bp->attn_group[index].sig[2] = REG_RD(bp,
4082 reg_offset + 0x8 + 0x10*index);
4083 bp->attn_group[index].sig[3] = REG_RD(bp,
4084 reg_offset + 0xc + 0x10*index);
4087 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4088 HC_REG_ATTN_MSG0_ADDR_L);
4090 REG_WR(bp, reg_offset, U64_LO(section));
4091 REG_WR(bp, reg_offset + 4, U64_HI(section));
4093 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4095 val = REG_RD(bp, reg_offset);
4097 REG_WR(bp, reg_offset, val);
4100 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4101 u_def_status_block);
4102 def_sb->u_def_status_block.status_block_id = sb_id;
4104 REG_WR(bp, BAR_USTRORM_INTMEM +
4105 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4106 REG_WR(bp, BAR_USTRORM_INTMEM +
4107 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4109 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4110 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4112 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4113 REG_WR16(bp, BAR_USTRORM_INTMEM +
4114 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4117 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4118 c_def_status_block);
4119 def_sb->c_def_status_block.status_block_id = sb_id;
4121 REG_WR(bp, BAR_CSTRORM_INTMEM +
4122 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4123 REG_WR(bp, BAR_CSTRORM_INTMEM +
4124 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4126 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4127 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4129 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4131 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4134 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135 t_def_status_block);
4136 def_sb->t_def_status_block.status_block_id = sb_id;
4138 REG_WR(bp, BAR_TSTRORM_INTMEM +
4139 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140 REG_WR(bp, BAR_TSTRORM_INTMEM +
4141 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4143 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4144 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4146 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4147 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4148 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4151 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152 x_def_status_block);
4153 def_sb->x_def_status_block.status_block_id = sb_id;
4155 REG_WR(bp, BAR_XSTRORM_INTMEM +
4156 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157 REG_WR(bp, BAR_XSTRORM_INTMEM +
4158 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4160 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4161 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4163 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4164 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4165 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4167 bp->stats_pending = 0;
4168 bp->set_mac_pending = 0;
4170 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4173 static void bnx2x_update_coalesce(struct bnx2x *bp)
4175 int port = BP_PORT(bp);
4178 for_each_queue(bp, i) {
4179 int sb_id = bp->fp[i].sb_id;
4181 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4182 REG_WR8(bp, BAR_USTRORM_INTMEM +
4183 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4184 U_SB_ETH_RX_CQ_INDEX),
4186 REG_WR16(bp, BAR_USTRORM_INTMEM +
4187 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4188 U_SB_ETH_RX_CQ_INDEX),
4189 bp->rx_ticks ? 0 : 1);
4190 REG_WR16(bp, BAR_USTRORM_INTMEM +
4191 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4192 U_SB_ETH_RX_BD_INDEX),
4193 bp->rx_ticks ? 0 : 1);
4195 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4196 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4197 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4198 C_SB_ETH_TX_CQ_INDEX),
4200 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4201 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4202 C_SB_ETH_TX_CQ_INDEX),
4203 bp->tx_ticks ? 0 : 1);
4207 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4208 struct bnx2x_fastpath *fp, int last)
4212 for (i = 0; i < last; i++) {
4213 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4214 struct sk_buff *skb = rx_buf->skb;
4217 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4221 if (fp->tpa_state[i] == BNX2X_TPA_START)
4222 pci_unmap_single(bp->pdev,
4223 pci_unmap_addr(rx_buf, mapping),
4224 bp->rx_buf_use_size,
4225 PCI_DMA_FROMDEVICE);
4232 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4234 int func = BP_FUNC(bp);
4235 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4236 ETH_MAX_AGGREGATION_QUEUES_E1H;
4237 u16 ring_prod, cqe_ring_prod;
4240 bp->rx_buf_use_size = bp->dev->mtu;
4241 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4242 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4244 if (bp->flags & TPA_ENABLE_FLAG) {
4246 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4247 bp->rx_buf_use_size, bp->rx_buf_size,
4248 bp->dev->mtu + ETH_OVREHEAD);
4250 for_each_queue(bp, j) {
4251 struct bnx2x_fastpath *fp = &bp->fp[j];
4253 for (i = 0; i < max_agg_queues; i++) {
4254 fp->tpa_pool[i].skb =
4255 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4256 if (!fp->tpa_pool[i].skb) {
4257 BNX2X_ERR("Failed to allocate TPA "
4258 "skb pool for queue[%d] - "
4259 "disabling TPA on this "
4261 bnx2x_free_tpa_pool(bp, fp, i);
4262 fp->disable_tpa = 1;
4265 pci_unmap_addr_set((struct sw_rx_bd *)
4266 &bp->fp->tpa_pool[i],
4268 fp->tpa_state[i] = BNX2X_TPA_STOP;
4273 for_each_queue(bp, j) {
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4277 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4278 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4280 /* "next page" elements initialization */
4282 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4283 struct eth_rx_sge *sge;
4285 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4287 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4288 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4290 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4291 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4294 bnx2x_init_sge_ring_bit_mask(fp);
4297 for (i = 1; i <= NUM_RX_RINGS; i++) {
4298 struct eth_rx_bd *rx_bd;
4300 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4302 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4303 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4305 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4306 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4310 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4311 struct eth_rx_cqe_next_page *nextpg;
4313 nextpg = (struct eth_rx_cqe_next_page *)
4314 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4316 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4317 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4319 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4320 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4323 /* Allocate SGEs and initialize the ring elements */
4324 for (i = 0, ring_prod = 0;
4325 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4327 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4328 BNX2X_ERR("was only able to allocate "
4330 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4331 /* Cleanup already allocated elements */
4332 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4333 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4334 fp->disable_tpa = 1;
4338 ring_prod = NEXT_SGE_IDX(ring_prod);
4340 fp->rx_sge_prod = ring_prod;
4342 /* Allocate BDs and initialize BD ring */
4343 fp->rx_comp_cons = 0;
4344 cqe_ring_prod = ring_prod = 0;
4345 for (i = 0; i < bp->rx_ring_size; i++) {
4346 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4347 BNX2X_ERR("was only able to allocate "
4349 bp->eth_stats.rx_skb_alloc_failed++;
4352 ring_prod = NEXT_RX_IDX(ring_prod);
4353 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4354 WARN_ON(ring_prod <= i);
4357 fp->rx_bd_prod = ring_prod;
4358 /* must not have more available CQEs than BDs */
4359 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4361 fp->rx_pkt = fp->rx_calls = 0;
4364 * this will generate an interrupt (to the TSTORM)
4365 * must only be done after chip is initialized
4367 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4372 REG_WR(bp, BAR_USTRORM_INTMEM +
4373 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4374 U64_LO(fp->rx_comp_mapping));
4375 REG_WR(bp, BAR_USTRORM_INTMEM +
4376 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4377 U64_HI(fp->rx_comp_mapping));
4381 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4385 for_each_queue(bp, j) {
4386 struct bnx2x_fastpath *fp = &bp->fp[j];
4388 for (i = 1; i <= NUM_TX_RINGS; i++) {
4389 struct eth_tx_bd *tx_bd =
4390 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4393 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4394 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4396 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4397 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4400 fp->tx_pkt_prod = 0;
4401 fp->tx_pkt_cons = 0;
4404 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4409 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4411 int func = BP_FUNC(bp);
4413 spin_lock_init(&bp->spq_lock);
4415 bp->spq_left = MAX_SPQ_PENDING;
4416 bp->spq_prod_idx = 0;
4417 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4418 bp->spq_prod_bd = bp->spq;
4419 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4421 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4422 U64_LO(bp->spq_mapping));
4424 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4425 U64_HI(bp->spq_mapping));
4427 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4431 static void bnx2x_init_context(struct bnx2x *bp)
4435 for_each_queue(bp, i) {
4436 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4437 struct bnx2x_fastpath *fp = &bp->fp[i];
4438 u8 sb_id = FP_SB_ID(fp);
4440 context->xstorm_st_context.tx_bd_page_base_hi =
4441 U64_HI(fp->tx_desc_mapping);
4442 context->xstorm_st_context.tx_bd_page_base_lo =
4443 U64_LO(fp->tx_desc_mapping);
4444 context->xstorm_st_context.db_data_addr_hi =
4445 U64_HI(fp->tx_prods_mapping);
4446 context->xstorm_st_context.db_data_addr_lo =
4447 U64_LO(fp->tx_prods_mapping);
4448 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4449 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4451 context->ustorm_st_context.common.sb_index_numbers =
4452 BNX2X_RX_SB_INDEX_NUM;
4453 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4454 context->ustorm_st_context.common.status_block_id = sb_id;
4455 context->ustorm_st_context.common.flags =
4456 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4457 context->ustorm_st_context.common.mc_alignment_size = 64;
4458 context->ustorm_st_context.common.bd_buff_size =
4459 bp->rx_buf_use_size;
4460 context->ustorm_st_context.common.bd_page_base_hi =
4461 U64_HI(fp->rx_desc_mapping);
4462 context->ustorm_st_context.common.bd_page_base_lo =
4463 U64_LO(fp->rx_desc_mapping);
4464 if (!fp->disable_tpa) {
4465 context->ustorm_st_context.common.flags |=
4466 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4467 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4468 context->ustorm_st_context.common.sge_buff_size =
4469 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4470 context->ustorm_st_context.common.sge_page_base_hi =
4471 U64_HI(fp->rx_sge_mapping);
4472 context->ustorm_st_context.common.sge_page_base_lo =
4473 U64_LO(fp->rx_sge_mapping);
4476 context->cstorm_st_context.sb_index_number =
4477 C_SB_ETH_TX_CQ_INDEX;
4478 context->cstorm_st_context.status_block_id = sb_id;
4480 context->xstorm_ag_context.cdu_reserved =
4481 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4482 CDU_REGION_NUMBER_XCM_AG,
4483 ETH_CONNECTION_TYPE);
4484 context->ustorm_ag_context.cdu_usage =
4485 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4486 CDU_REGION_NUMBER_UCM_AG,
4487 ETH_CONNECTION_TYPE);
4491 static void bnx2x_init_ind_table(struct bnx2x *bp)
4493 int port = BP_PORT(bp);
4499 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4500 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4501 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4502 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4503 i % bp->num_queues);
4505 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4508 static void bnx2x_set_client_config(struct bnx2x *bp)
4510 struct tstorm_eth_client_config tstorm_client = {0};
4511 int port = BP_PORT(bp);
4514 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4515 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4516 tstorm_client.config_flags =
4517 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4519 if (bp->rx_mode && bp->vlgrp) {
4520 tstorm_client.config_flags |=
4521 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4522 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4526 if (bp->flags & TPA_ENABLE_FLAG) {
4527 tstorm_client.max_sges_for_packet =
4528 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4529 tstorm_client.max_sges_for_packet =
4530 ((tstorm_client.max_sges_for_packet +
4531 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4532 PAGES_PER_SGE_SHIFT;
4534 tstorm_client.config_flags |=
4535 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4538 for_each_queue(bp, i) {
4539 REG_WR(bp, BAR_TSTRORM_INTMEM +
4540 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4541 ((u32 *)&tstorm_client)[0]);
4542 REG_WR(bp, BAR_TSTRORM_INTMEM +
4543 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4544 ((u32 *)&tstorm_client)[1]);
4547 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4548 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4553 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4554 int mode = bp->rx_mode;
4555 int mask = (1 << BP_L_ID(bp));
4556 int func = BP_FUNC(bp);
4559 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4562 case BNX2X_RX_MODE_NONE: /* no Rx */
4563 tstorm_mac_filter.ucast_drop_all = mask;
4564 tstorm_mac_filter.mcast_drop_all = mask;
4565 tstorm_mac_filter.bcast_drop_all = mask;
4567 case BNX2X_RX_MODE_NORMAL:
4568 tstorm_mac_filter.bcast_accept_all = mask;
4570 case BNX2X_RX_MODE_ALLMULTI:
4571 tstorm_mac_filter.mcast_accept_all = mask;
4572 tstorm_mac_filter.bcast_accept_all = mask;
4574 case BNX2X_RX_MODE_PROMISC:
4575 tstorm_mac_filter.ucast_accept_all = mask;
4576 tstorm_mac_filter.mcast_accept_all = mask;
4577 tstorm_mac_filter.bcast_accept_all = mask;
4580 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4584 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4585 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4587 ((u32 *)&tstorm_mac_filter)[i]);
4589 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4590 ((u32 *)&tstorm_mac_filter)[i]); */
4593 if (mode != BNX2X_RX_MODE_NONE)
4594 bnx2x_set_client_config(bp);
4597 static void bnx2x_init_internal_common(struct bnx2x *bp)
4601 /* Zero this manually as its initialization is
4602 currently missing in the initTool */
4603 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
4605 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4608 static void bnx2x_init_internal_port(struct bnx2x *bp)
4610 int port = BP_PORT(bp);
4612 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4613 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4615 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4618 static void bnx2x_init_internal_func(struct bnx2x *bp)
4620 struct tstorm_eth_function_common_config tstorm_config = {0};
4621 struct stats_indication_flags stats_flags = {0};
4622 int port = BP_PORT(bp);
4623 int func = BP_FUNC(bp);
4628 tstorm_config.config_flags = MULTI_FLAGS;
4629 tstorm_config.rss_result_mask = MULTI_MASK;
4632 tstorm_config.leading_client_id = BP_L_ID(bp);
4634 REG_WR(bp, BAR_TSTRORM_INTMEM +
4635 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4636 (*(u32 *)&tstorm_config));
4638 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4639 bnx2x_set_storm_rx_mode(bp);
4641 /* reset xstorm per client statistics */
4642 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4643 REG_WR(bp, BAR_XSTRORM_INTMEM +
4644 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4647 /* reset tstorm per client statistics */
4648 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4649 REG_WR(bp, BAR_TSTRORM_INTMEM +
4650 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4654 /* Init statistics related context */
4655 stats_flags.collect_eth = 1;
4657 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4658 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660 ((u32 *)&stats_flags)[1]);
4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4663 ((u32 *)&stats_flags)[0]);
4664 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4665 ((u32 *)&stats_flags)[1]);
4667 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4668 ((u32 *)&stats_flags)[0]);
4669 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4670 ((u32 *)&stats_flags)[1]);
4672 REG_WR(bp, BAR_XSTRORM_INTMEM +
4673 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4674 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4675 REG_WR(bp, BAR_XSTRORM_INTMEM +
4676 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4677 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4679 REG_WR(bp, BAR_TSTRORM_INTMEM +
4680 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682 REG_WR(bp, BAR_TSTRORM_INTMEM +
4683 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4686 if (CHIP_IS_E1H(bp)) {
4687 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4689 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4691 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4693 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4696 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4700 /* Init CQ ring mapping and aggregation size */
4701 max_agg_size = min((u32)(bp->rx_buf_use_size +
4702 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4704 for_each_queue(bp, i) {
4705 struct bnx2x_fastpath *fp = &bp->fp[i];
4707 REG_WR(bp, BAR_USTRORM_INTMEM +
4708 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4709 U64_LO(fp->rx_comp_mapping));
4710 REG_WR(bp, BAR_USTRORM_INTMEM +
4711 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4712 U64_HI(fp->rx_comp_mapping));
4714 REG_WR16(bp, BAR_USTRORM_INTMEM +
4715 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4720 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4722 switch (load_code) {
4723 case FW_MSG_CODE_DRV_LOAD_COMMON:
4724 bnx2x_init_internal_common(bp);
4727 case FW_MSG_CODE_DRV_LOAD_PORT:
4728 bnx2x_init_internal_port(bp);
4731 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4732 bnx2x_init_internal_func(bp);
4736 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4741 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4745 for_each_queue(bp, i) {
4746 struct bnx2x_fastpath *fp = &bp->fp[i];
4749 fp->state = BNX2X_FP_STATE_CLOSED;
4751 fp->cl_id = BP_L_ID(bp) + i;
4752 fp->sb_id = fp->cl_id;
4754 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4755 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4756 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4758 bnx2x_update_fpsb_idx(fp);
4761 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4763 bnx2x_update_dsb_idx(bp);
4764 bnx2x_update_coalesce(bp);
4765 bnx2x_init_rx_rings(bp);
4766 bnx2x_init_tx_ring(bp);
4767 bnx2x_init_sp_ring(bp);
4768 bnx2x_init_context(bp);
4769 bnx2x_init_internal(bp, load_code);
4770 bnx2x_init_ind_table(bp);
4771 bnx2x_int_enable(bp);
4774 /* end of nic init */
4777 * gzip service functions
4780 static int bnx2x_gunzip_init(struct bnx2x *bp)
4782 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4783 &bp->gunzip_mapping);
4784 if (bp->gunzip_buf == NULL)
4787 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4788 if (bp->strm == NULL)
4791 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4793 if (bp->strm->workspace == NULL)
4803 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4804 bp->gunzip_mapping);
4805 bp->gunzip_buf = NULL;
4808 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4809 " un-compression\n", bp->dev->name);
4813 static void bnx2x_gunzip_end(struct bnx2x *bp)
4815 kfree(bp->strm->workspace);
4820 if (bp->gunzip_buf) {
4821 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822 bp->gunzip_mapping);
4823 bp->gunzip_buf = NULL;
4827 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4831 /* check gzip header */
4832 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4839 if (zbuf[3] & FNAME)
4840 while ((zbuf[n++] != 0) && (n < len));
4842 bp->strm->next_in = zbuf + n;
4843 bp->strm->avail_in = len - n;
4844 bp->strm->next_out = bp->gunzip_buf;
4845 bp->strm->avail_out = FW_BUF_SIZE;
4847 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4851 rc = zlib_inflate(bp->strm, Z_FINISH);
4852 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4853 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4854 bp->dev->name, bp->strm->msg);
4856 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4857 if (bp->gunzip_outlen & 0x3)
4858 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4859 " gunzip_outlen (%d) not aligned\n",
4860 bp->dev->name, bp->gunzip_outlen);
4861 bp->gunzip_outlen >>= 2;
4863 zlib_inflateEnd(bp->strm);
4865 if (rc == Z_STREAM_END)
4871 /* nic load/unload */
4874 * General service functions
4877 /* send a NIG loopback debug packet */
4878 static void bnx2x_lb_pckt(struct bnx2x *bp)
4882 /* Ethernet source and destination addresses */
4883 wb_write[0] = 0x55555555;
4884 wb_write[1] = 0x55555555;
4885 wb_write[2] = 0x20; /* SOP */
4886 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4888 /* NON-IP protocol */
4889 wb_write[0] = 0x09000000;
4890 wb_write[1] = 0x55555555;
4891 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
4892 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4895 /* some of the internal memories
4896 * are not directly readable from the driver
4897 * to test them we send debug packets
4899 static int bnx2x_int_mem_test(struct bnx2x *bp)
4905 if (CHIP_REV_IS_FPGA(bp))
4907 else if (CHIP_REV_IS_EMUL(bp))
4912 DP(NETIF_MSG_HW, "start part1\n");
4914 /* Disable inputs of parser neighbor blocks */
4915 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4916 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4917 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4918 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4920 /* Write 0 to parser credits for CFC search request */
4921 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4923 /* send Ethernet packet */
4926 /* TODO do i reset NIG statistic? */
4927 /* Wait until NIG register shows 1 packet of size 0x10 */
4928 count = 1000 * factor;
4931 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4932 val = *bnx2x_sp(bp, wb_data[0]);
4940 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4944 /* Wait until PRS register shows 1 packet */
4945 count = 1000 * factor;
4947 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4955 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4959 /* Reset and init BRB, PRS */
4960 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4962 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4964 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4965 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4967 DP(NETIF_MSG_HW, "part2\n");
4969 /* Disable inputs of parser neighbor blocks */
4970 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4971 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4972 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4973 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4975 /* Write 0 to parser credits for CFC search request */
4976 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4978 /* send 10 Ethernet packets */
4979 for (i = 0; i < 10; i++)
4982 /* Wait until NIG register shows 10 + 1
4983 packets of size 11*0x10 = 0xb0 */
4984 count = 1000 * factor;
4987 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4988 val = *bnx2x_sp(bp, wb_data[0]);
4996 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5000 /* Wait until PRS register shows 2 packets */
5001 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5003 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5005 /* Write 1 to parser credits for CFC search request */
5006 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5008 /* Wait until PRS register shows 3 packets */
5009 msleep(10 * factor);
5010 /* Wait until NIG register shows 1 packet of size 0x10 */
5011 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5013 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5015 /* clear NIG EOP FIFO */
5016 for (i = 0; i < 11; i++)
5017 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5018 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5020 BNX2X_ERR("clear of NIG failed\n");
5024 /* Reset and init BRB, PRS, NIG */
5025 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5027 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5029 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5030 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5033 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5036 /* Enable inputs of parser neighbor blocks */
5037 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5038 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5039 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5040 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5042 DP(NETIF_MSG_HW, "done\n");
5047 static void enable_blocks_attention(struct bnx2x *bp)
5049 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5050 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5051 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5052 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5053 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5054 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5055 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5056 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5057 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5058 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5059 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5060 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5061 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5062 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5063 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5064 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5065 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5066 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5067 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5068 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5069 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5070 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5071 if (CHIP_REV_IS_FPGA(bp))
5072 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5074 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5075 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5076 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5077 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5078 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5079 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5080 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5081 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5082 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5083 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5087 static int bnx2x_init_common(struct bnx2x *bp)
5091 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5093 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5094 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5096 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5097 if (CHIP_IS_E1H(bp))
5098 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5100 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5102 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5104 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5105 if (CHIP_IS_E1(bp)) {
5106 /* enable HW interrupt from PXP on USDM overflow
5107 bit 16 on INT_MASK_0 */
5108 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5111 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5115 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5116 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5117 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5118 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5119 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5120 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5122 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5123 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5124 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5125 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5126 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5131 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5134 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5136 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5137 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5138 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5141 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5142 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5144 /* let the HW do it's magic ... */
5146 /* finish PXP init */
5147 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5149 BNX2X_ERR("PXP2 CFG failed\n");
5152 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5154 BNX2X_ERR("PXP2 RD_INIT failed\n");
5158 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5159 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5161 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5163 /* clean the DMAE memory */
5165 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5167 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5168 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5169 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5170 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5172 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5173 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5174 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5175 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5177 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5178 /* soft reset pulse */
5179 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5180 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5183 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5186 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5187 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5188 if (!CHIP_REV_IS_SLOW(bp)) {
5189 /* enable hw interrupt from doorbell Q */
5190 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5193 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5194 if (CHIP_REV_IS_SLOW(bp)) {
5195 /* fix for emulation and FPGA for no pause */
5196 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5197 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5198 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5199 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5202 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5203 if (CHIP_IS_E1H(bp))
5204 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5206 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5207 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5208 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5209 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5211 if (CHIP_IS_E1H(bp)) {
5212 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5213 STORM_INTMEM_SIZE_E1H/2);
5215 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5216 0, STORM_INTMEM_SIZE_E1H/2);
5217 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5218 STORM_INTMEM_SIZE_E1H/2);
5220 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221 0, STORM_INTMEM_SIZE_E1H/2);
5222 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5223 STORM_INTMEM_SIZE_E1H/2);
5225 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226 0, STORM_INTMEM_SIZE_E1H/2);
5227 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5228 STORM_INTMEM_SIZE_E1H/2);
5230 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231 0, STORM_INTMEM_SIZE_E1H/2);
5233 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5234 STORM_INTMEM_SIZE_E1);
5235 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5236 STORM_INTMEM_SIZE_E1);
5237 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5238 STORM_INTMEM_SIZE_E1);
5239 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1);
5243 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5244 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5245 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5246 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5249 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5251 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5254 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5255 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5256 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5258 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5259 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5260 REG_WR(bp, i, 0xc0cac01a);
5261 /* TODO: replace with something meaningful */
5263 if (CHIP_IS_E1H(bp))
5264 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5265 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5267 if (sizeof(union cdu_context) != 1024)
5268 /* we currently assume that a context is 1024 bytes */
5269 printk(KERN_ALERT PFX "please adjust the size of"
5270 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5272 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5273 val = (4 << 24) + (0 << 12) + 1024;
5274 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5275 if (CHIP_IS_E1(bp)) {
5276 /* !!! fix pxp client crdit until excel update */
5277 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5278 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5281 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5282 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5284 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5285 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5287 /* PXPCS COMMON comes here */
5288 /* Reset PCIE errors for debug */
5289 REG_WR(bp, 0x2814, 0xffffffff);
5290 REG_WR(bp, 0x3820, 0xffffffff);
5292 /* EMAC0 COMMON comes here */
5293 /* EMAC1 COMMON comes here */
5294 /* DBU COMMON comes here */
5295 /* DBG COMMON comes here */
5297 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5298 if (CHIP_IS_E1H(bp)) {
5299 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5300 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5303 if (CHIP_REV_IS_SLOW(bp))
5306 /* finish CFC init */
5307 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5309 BNX2X_ERR("CFC LL_INIT failed\n");
5312 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5314 BNX2X_ERR("CFC AC_INIT failed\n");
5317 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5319 BNX2X_ERR("CFC CAM_INIT failed\n");
5322 REG_WR(bp, CFC_REG_DEBUG0, 0);
5324 /* read NIG statistic
5325 to see if this is our first up since powerup */
5326 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5327 val = *bnx2x_sp(bp, wb_data[0]);
5329 /* do internal memory self test */
5330 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5331 BNX2X_ERR("internal mem self test failed\n");
5335 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5336 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5337 /* Fan failure is indicated by SPIO 5 */
5338 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5339 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5341 /* set to active low mode */
5342 val = REG_RD(bp, MISC_REG_SPIO_INT);
5343 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5344 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5345 REG_WR(bp, MISC_REG_SPIO_INT, val);
5347 /* enable interrupt to signal the IGU */
5348 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5349 val |= (1 << MISC_REGISTERS_SPIO_5);
5350 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5357 /* clear PXP2 attentions */
5358 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5360 enable_blocks_attention(bp);
5362 if (bp->flags & TPA_ENABLE_FLAG) {
5363 struct tstorm_eth_tpa_exist tmp = {0};
5367 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5369 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5376 static int bnx2x_init_port(struct bnx2x *bp)
5378 int port = BP_PORT(bp);
5381 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5383 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5385 /* Port PXP comes here */
5386 /* Port PXP2 comes here */
5391 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5392 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5393 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5394 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5399 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5400 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5401 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5402 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5407 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5408 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5409 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5410 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5412 /* Port CMs come here */
5414 /* Port QM comes here */
5416 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5417 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5419 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5420 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5422 /* Port DQ comes here */
5423 /* Port BRB1 comes here */
5424 /* Port PRS comes here */
5425 /* Port TSDM comes here */
5426 /* Port CSDM comes here */
5427 /* Port USDM comes here */
5428 /* Port XSDM comes here */
5429 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5430 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5431 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5432 port ? USEM_PORT1_END : USEM_PORT0_END);
5433 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5434 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5435 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5436 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5437 /* Port UPB comes here */
5438 /* Port XPB comes here */
5440 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5441 port ? PBF_PORT1_END : PBF_PORT0_END);
5443 /* configure PBF to work without PAUSE mtu 9000 */
5444 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5446 /* update threshold */
5447 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5448 /* update init credit */
5449 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5452 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5454 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5457 /* tell the searcher where the T2 table is */
5458 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5460 wb_write[0] = U64_LO(bp->t2_mapping);
5461 wb_write[1] = U64_HI(bp->t2_mapping);
5462 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5463 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5464 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5465 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5467 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5468 /* Port SRCH comes here */
5470 /* Port CDU comes here */
5471 /* Port CFC comes here */
5473 if (CHIP_IS_E1(bp)) {
5474 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5475 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5477 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5478 port ? HC_PORT1_END : HC_PORT0_END);
5480 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5481 MISC_AEU_PORT0_START,
5482 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5483 /* init aeu_mask_attn_func_0/1:
5484 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5485 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5486 * bits 4-7 are used for "per vn group attention" */
5487 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5488 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5490 /* Port PXPCS comes here */
5491 /* Port EMAC0 comes here */
5492 /* Port EMAC1 comes here */
5493 /* Port DBU comes here */
5494 /* Port DBG comes here */
5495 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5496 port ? NIG_PORT1_END : NIG_PORT0_END);
5498 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5500 if (CHIP_IS_E1H(bp)) {
5502 struct cmng_struct_per_port m_cmng_port;
5505 /* 0x2 disable e1hov, 0x1 enable */
5506 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5507 (IS_E1HMF(bp) ? 0x1 : 0x2));
5509 /* Init RATE SHAPING and FAIRNESS contexts.
5510 Initialize as if there is 10G link. */
5511 wsum = bnx2x_calc_vn_wsum(bp);
5512 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5514 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5515 bnx2x_init_vn_minmax(bp, 2*vn + port,
5516 wsum, 10000, &m_cmng_port);
5519 /* Port MCP comes here */
5520 /* Port DMAE comes here */
5522 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5523 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5524 /* add SPIO 5 to group 0 */
5525 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5526 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5527 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5534 bnx2x__link_reset(bp);
5539 #define ILT_PER_FUNC (768/2)
5540 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5541 /* the phys address is shifted right 12 bits and has an added
5542 1=valid bit added to the 53rd bit
5543 then since this is a wide register(TM)
5544 we split it into two 32 bit writes
5546 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5547 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5548 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5549 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5551 #define CNIC_ILT_LINES 0
5553 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5557 if (CHIP_IS_E1H(bp))
5558 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5560 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5562 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5565 static int bnx2x_init_func(struct bnx2x *bp)
5567 int port = BP_PORT(bp);
5568 int func = BP_FUNC(bp);
5571 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5573 i = FUNC_ILT_BASE(func);
5575 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5576 if (CHIP_IS_E1H(bp)) {
5577 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5578 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5580 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5581 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5584 if (CHIP_IS_E1H(bp)) {
5585 for (i = 0; i < 9; i++)
5586 bnx2x_init_block(bp,
5587 cm_start[func][i], cm_end[func][i]);
5589 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5590 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5593 /* HC init per function */
5594 if (CHIP_IS_E1H(bp)) {
5595 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5597 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5598 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5600 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5602 if (CHIP_IS_E1H(bp))
5603 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5605 /* Reset PCIE errors for debug */
5606 REG_WR(bp, 0x2114, 0xffffffff);
5607 REG_WR(bp, 0x2120, 0xffffffff);
5612 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5616 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5617 BP_FUNC(bp), load_code);
5620 mutex_init(&bp->dmae_mutex);
5621 bnx2x_gunzip_init(bp);
5623 switch (load_code) {
5624 case FW_MSG_CODE_DRV_LOAD_COMMON:
5625 rc = bnx2x_init_common(bp);
5630 case FW_MSG_CODE_DRV_LOAD_PORT:
5632 rc = bnx2x_init_port(bp);
5637 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5639 rc = bnx2x_init_func(bp);
5645 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5649 if (!BP_NOMCP(bp)) {
5650 int func = BP_FUNC(bp);
5652 bp->fw_drv_pulse_wr_seq =
5653 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5654 DRV_PULSE_SEQ_MASK);
5655 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5656 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5657 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5661 /* this needs to be done before gunzip end */
5662 bnx2x_zero_def_sb(bp);
5663 for_each_queue(bp, i)
5664 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5667 bnx2x_gunzip_end(bp);
5672 /* send the MCP a request, block until there is a reply */
5673 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5675 int func = BP_FUNC(bp);
5676 u32 seq = ++bp->fw_seq;
5679 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5681 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5682 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5685 /* let the FW do it's magic ... */
5688 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5690 /* Give the FW up to 2 second (200*10ms) */
5691 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5693 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5694 cnt*delay, rc, seq);
5696 /* is this a reply to our command? */
5697 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5698 rc &= FW_MSG_CODE_MASK;
5702 BNX2X_ERR("FW failed to respond!\n");
5710 static void bnx2x_free_mem(struct bnx2x *bp)
5713 #define BNX2X_PCI_FREE(x, y, size) \
5716 pci_free_consistent(bp->pdev, size, x, y); \
5722 #define BNX2X_FREE(x) \
5733 for_each_queue(bp, i) {
5736 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5737 bnx2x_fp(bp, i, status_blk_mapping),
5738 sizeof(struct host_status_block) +
5739 sizeof(struct eth_tx_db_data));
5741 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5742 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5744 bnx2x_fp(bp, i, tx_desc_mapping),
5745 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5747 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5749 bnx2x_fp(bp, i, rx_desc_mapping),
5750 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5752 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5753 bnx2x_fp(bp, i, rx_comp_mapping),
5754 sizeof(struct eth_fast_path_rx_cqe) *
5758 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5760 bnx2x_fp(bp, i, rx_sge_mapping),
5761 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5763 /* end of fastpath */
5765 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5766 sizeof(struct host_def_status_block));
5768 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5769 sizeof(struct bnx2x_slowpath));
5772 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5773 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5774 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5775 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5777 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5779 #undef BNX2X_PCI_FREE
5783 static int bnx2x_alloc_mem(struct bnx2x *bp)
5786 #define BNX2X_PCI_ALLOC(x, y, size) \
5788 x = pci_alloc_consistent(bp->pdev, size, y); \
5790 goto alloc_mem_err; \
5791 memset(x, 0, size); \
5794 #define BNX2X_ALLOC(x, size) \
5796 x = vmalloc(size); \
5798 goto alloc_mem_err; \
5799 memset(x, 0, size); \
5805 for_each_queue(bp, i) {
5806 bnx2x_fp(bp, i, bp) = bp;
5809 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5810 &bnx2x_fp(bp, i, status_blk_mapping),
5811 sizeof(struct host_status_block) +
5812 sizeof(struct eth_tx_db_data));
5814 bnx2x_fp(bp, i, hw_tx_prods) =
5815 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5817 bnx2x_fp(bp, i, tx_prods_mapping) =
5818 bnx2x_fp(bp, i, status_blk_mapping) +
5819 sizeof(struct host_status_block);
5821 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5822 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5823 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5824 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5825 &bnx2x_fp(bp, i, tx_desc_mapping),
5826 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5828 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5829 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5831 &bnx2x_fp(bp, i, rx_desc_mapping),
5832 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5835 &bnx2x_fp(bp, i, rx_comp_mapping),
5836 sizeof(struct eth_fast_path_rx_cqe) *
5840 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5841 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5842 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5843 &bnx2x_fp(bp, i, rx_sge_mapping),
5844 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5846 /* end of fastpath */
5848 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849 sizeof(struct host_def_status_block));
5851 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852 sizeof(struct bnx2x_slowpath));
5855 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5858 for (i = 0; i < 64*1024; i += 64) {
5859 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5863 /* allocate searcher T2 table
5864 we allocate 1/4 of alloc num for T2
5865 (which is not entered into the ILT) */
5866 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5869 for (i = 0; i < 16*1024; i += 64)
5870 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5872 /* now fixup the last line in the block to point to the next block */
5873 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5875 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5878 /* QM queues (128*MAX_CONN) */
5879 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5882 /* Slow path ring */
5883 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5891 #undef BNX2X_PCI_ALLOC
5895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5899 for_each_queue(bp, i) {
5900 struct bnx2x_fastpath *fp = &bp->fp[i];
5902 u16 bd_cons = fp->tx_bd_cons;
5903 u16 sw_prod = fp->tx_pkt_prod;
5904 u16 sw_cons = fp->tx_pkt_cons;
5906 while (sw_cons != sw_prod) {
5907 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5913 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5917 for_each_queue(bp, j) {
5918 struct bnx2x_fastpath *fp = &bp->fp[j];
5920 for (i = 0; i < NUM_RX_BD; i++) {
5921 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5922 struct sk_buff *skb = rx_buf->skb;
5927 pci_unmap_single(bp->pdev,
5928 pci_unmap_addr(rx_buf, mapping),
5929 bp->rx_buf_use_size,
5930 PCI_DMA_FROMDEVICE);
5935 if (!fp->disable_tpa)
5936 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5937 ETH_MAX_AGGREGATION_QUEUES_E1 :
5938 ETH_MAX_AGGREGATION_QUEUES_E1H);
5942 static void bnx2x_free_skbs(struct bnx2x *bp)
5944 bnx2x_free_tx_skbs(bp);
5945 bnx2x_free_rx_skbs(bp);
5948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5952 free_irq(bp->msix_table[0].vector, bp->dev);
5953 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5954 bp->msix_table[0].vector);
5956 for_each_queue(bp, i) {
5957 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5958 "state %x\n", i, bp->msix_table[i + offset].vector,
5959 bnx2x_fp(bp, i, state));
5961 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5962 BNX2X_ERR("IRQ of fp #%d being freed while "
5963 "state != closed\n", i);
5965 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5969 static void bnx2x_free_irq(struct bnx2x *bp)
5971 if (bp->flags & USING_MSIX_FLAG) {
5972 bnx2x_free_msix_irqs(bp);
5973 pci_disable_msix(bp->pdev);
5974 bp->flags &= ~USING_MSIX_FLAG;
5977 free_irq(bp->pdev->irq, bp->dev);
5980 static int bnx2x_enable_msix(struct bnx2x *bp)
5984 bp->msix_table[0].entry = 0;
5986 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5988 for_each_queue(bp, i) {
5989 int igu_vec = offset + i + BP_L_ID(bp);
5991 bp->msix_table[i + offset].entry = igu_vec;
5992 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5993 "(fastpath #%u)\n", i + offset, igu_vec, i);
5996 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997 bp->num_queues + offset);
5999 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6002 bp->flags |= USING_MSIX_FLAG;
6007 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6009 int i, rc, offset = 1;
6011 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6012 bp->dev->name, bp->dev);
6014 BNX2X_ERR("request sp irq failed\n");
6018 for_each_queue(bp, i) {
6019 rc = request_irq(bp->msix_table[i + offset].vector,
6020 bnx2x_msix_fp_int, 0,
6021 bp->dev->name, &bp->fp[i]);
6023 BNX2X_ERR("request fp #%d irq failed rc %d\n",
6025 bnx2x_free_msix_irqs(bp);
6029 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6035 static int bnx2x_req_irq(struct bnx2x *bp)
6039 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6040 bp->dev->name, bp->dev);
6042 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6048 * Init service functions
6051 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6053 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6054 int port = BP_PORT(bp);
6057 * unicasts 0-31:port0 32-63:port1
6058 * multicast 64-127:port0 128-191:port1
6060 config->hdr.length_6b = 2;
6061 config->hdr.offset = port ? 31 : 0;
6062 config->hdr.client_id = BP_CL_ID(bp);
6063 config->hdr.reserved1 = 0;
6066 config->config_table[0].cam_entry.msb_mac_addr =
6067 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6068 config->config_table[0].cam_entry.middle_mac_addr =
6069 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6070 config->config_table[0].cam_entry.lsb_mac_addr =
6071 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6072 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6074 config->config_table[0].target_table_entry.flags = 0;
6076 CAM_INVALIDATE(config->config_table[0]);
6077 config->config_table[0].target_table_entry.client_id = 0;
6078 config->config_table[0].target_table_entry.vlan_id = 0;
6080 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6081 (set ? "setting" : "clearing"),
6082 config->config_table[0].cam_entry.msb_mac_addr,
6083 config->config_table[0].cam_entry.middle_mac_addr,
6084 config->config_table[0].cam_entry.lsb_mac_addr);
6087 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6088 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6089 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6090 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6092 config->config_table[1].target_table_entry.flags =
6093 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6095 CAM_INVALIDATE(config->config_table[1]);
6096 config->config_table[1].target_table_entry.client_id = 0;
6097 config->config_table[1].target_table_entry.vlan_id = 0;
6099 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6100 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6101 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6104 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6106 struct mac_configuration_cmd_e1h *config =
6107 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6109 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6110 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6114 /* CAM allocation for E1H
6115 * unicasts: by func number
6116 * multicast: 20+FUNC*20, 20 each
6118 config->hdr.length_6b = 1;
6119 config->hdr.offset = BP_FUNC(bp);
6120 config->hdr.client_id = BP_CL_ID(bp);
6121 config->hdr.reserved1 = 0;
6124 config->config_table[0].msb_mac_addr =
6125 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6126 config->config_table[0].middle_mac_addr =
6127 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6128 config->config_table[0].lsb_mac_addr =
6129 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6130 config->config_table[0].client_id = BP_L_ID(bp);
6131 config->config_table[0].vlan_id = 0;
6132 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6134 config->config_table[0].flags = BP_PORT(bp);
6136 config->config_table[0].flags =
6137 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6139 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6140 (set ? "setting" : "clearing"),
6141 config->config_table[0].msb_mac_addr,
6142 config->config_table[0].middle_mac_addr,
6143 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6145 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6146 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6147 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6150 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6151 int *state_p, int poll)
6153 /* can take a while if any port is running */
6156 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6157 poll ? "polling" : "waiting", state, idx);
6162 bnx2x_rx_int(bp->fp, 10);
6163 /* if index is different from 0
6164 * the reply for some commands will
6165 * be on the non default queue
6168 bnx2x_rx_int(&bp->fp[idx], 10);
6171 mb(); /* state is changed by bnx2x_sp_event() */
6172 if (*state_p == state)
6179 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6180 poll ? "polling" : "waiting", state, idx);
6181 #ifdef BNX2X_STOP_ON_ERROR
6188 static int bnx2x_setup_leading(struct bnx2x *bp)
6192 /* reset IGU state */
6193 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6196 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6198 /* Wait for completion */
6199 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6204 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6206 /* reset IGU state */
6207 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6210 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6211 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6213 /* Wait for completion */
6214 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6215 &(bp->fp[index].state), 0);
6218 static int bnx2x_poll(struct napi_struct *napi, int budget);
6219 static void bnx2x_set_rx_mode(struct net_device *dev);
6221 /* must be called with rtnl_lock */
6222 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6227 #ifdef BNX2X_STOP_ON_ERROR
6228 if (unlikely(bp->panic))
6232 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6234 /* Send LOAD_REQUEST command to MCP
6235 Returns the type of LOAD command:
6236 if it is the first port to be initialized
6237 common blocks should be initialized, otherwise - not
6239 if (!BP_NOMCP(bp)) {
6240 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6242 BNX2X_ERR("MCP response failure, aborting\n");
6245 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6246 return -EBUSY; /* other port in diagnostic mode */
6249 int port = BP_PORT(bp);
6251 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6252 load_count[0], load_count[1], load_count[2]);
6254 load_count[1 + port]++;
6255 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6256 load_count[0], load_count[1], load_count[2]);
6257 if (load_count[0] == 1)
6258 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6259 else if (load_count[1 + port] == 1)
6260 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6262 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6265 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6266 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6270 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6272 /* if we can't use MSI-X we only need one fp,
6273 * so try to enable MSI-X with the requested number of fp's
6274 * and fallback to inta with one fp
6280 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6281 /* user requested number */
6282 bp->num_queues = use_multi;
6285 bp->num_queues = min_t(u32, num_online_cpus(),
6290 if (bnx2x_enable_msix(bp)) {
6291 /* failed to enable MSI-X */
6294 BNX2X_ERR("Multi requested but failed"
6295 " to enable MSI-X\n");
6299 "set number of queues to %d\n", bp->num_queues);
6301 if (bnx2x_alloc_mem(bp))
6304 for_each_queue(bp, i)
6305 bnx2x_fp(bp, i, disable_tpa) =
6306 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6308 if (bp->flags & USING_MSIX_FLAG) {
6309 rc = bnx2x_req_msix_irqs(bp);
6311 pci_disable_msix(bp->pdev);
6316 rc = bnx2x_req_irq(bp);
6318 BNX2X_ERR("IRQ request failed, aborting\n");
6323 for_each_queue(bp, i)
6324 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6328 rc = bnx2x_init_hw(bp, load_code);
6330 BNX2X_ERR("HW init failed, aborting\n");
6334 /* Setup NIC internals and enable interrupts */
6335 bnx2x_nic_init(bp, load_code);
6337 /* Send LOAD_DONE command to MCP */
6338 if (!BP_NOMCP(bp)) {
6339 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6341 BNX2X_ERR("MCP response failure, aborting\n");
6343 goto load_int_disable;
6347 bnx2x_stats_init(bp);
6349 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6351 /* Enable Rx interrupt handling before sending the ramrod
6352 as it's completed on Rx FP queue */
6353 for_each_queue(bp, i)
6354 napi_enable(&bnx2x_fp(bp, i, napi));
6356 /* Enable interrupt handling */
6357 atomic_set(&bp->intr_sem, 0);
6359 rc = bnx2x_setup_leading(bp);
6361 BNX2X_ERR("Setup leading failed!\n");
6362 goto load_stop_netif;
6365 if (CHIP_IS_E1H(bp))
6366 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6367 BNX2X_ERR("!!! mf_cfg function disabled\n");
6368 bp->state = BNX2X_STATE_DISABLED;
6371 if (bp->state == BNX2X_STATE_OPEN)
6372 for_each_nondefault_queue(bp, i) {
6373 rc = bnx2x_setup_multi(bp, i);
6375 goto load_stop_netif;
6379 bnx2x_set_mac_addr_e1(bp, 1);
6381 bnx2x_set_mac_addr_e1h(bp, 1);
6384 bnx2x_initial_phy_init(bp);
6386 /* Start fast path */
6387 switch (load_mode) {
6389 /* Tx queue should be only reenabled */
6390 netif_wake_queue(bp->dev);
6391 bnx2x_set_rx_mode(bp->dev);
6395 netif_start_queue(bp->dev);
6396 bnx2x_set_rx_mode(bp->dev);
6397 if (bp->flags & USING_MSIX_FLAG)
6398 printk(KERN_INFO PFX "%s: using MSI-X\n",
6403 bnx2x_set_rx_mode(bp->dev);
6404 bp->state = BNX2X_STATE_DIAG;
6412 bnx2x__link_status_update(bp);
6414 /* start the timer */
6415 mod_timer(&bp->timer, jiffies + bp->current_interval);
6421 for_each_queue(bp, i)
6422 napi_disable(&bnx2x_fp(bp, i, napi));
6425 bnx2x_int_disable_sync(bp);
6430 /* Free SKBs, SGEs, TPA pool and driver internals */
6431 bnx2x_free_skbs(bp);
6432 for_each_queue(bp, i)
6433 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6434 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6438 /* TBD we really need to reset the chip
6439 if we want to recover from this */
6443 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6447 /* halt the connection */
6448 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6449 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6451 /* Wait for completion */
6452 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6453 &(bp->fp[index].state), 1);
6454 if (rc) /* timeout */
6457 /* delete cfc entry */
6458 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6460 /* Wait for completion */
6461 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6462 &(bp->fp[index].state), 1);
6466 static int bnx2x_stop_leading(struct bnx2x *bp)
6468 u16 dsb_sp_prod_idx;
6469 /* if the other port is handling traffic,
6470 this can take a lot of time */
6476 /* Send HALT ramrod */
6477 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6478 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6480 /* Wait for completion */
6481 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6482 &(bp->fp[0].state), 1);
6483 if (rc) /* timeout */
6486 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6488 /* Send PORT_DELETE ramrod */
6489 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6491 /* Wait for completion to arrive on default status block
6492 we are going to reset the chip anyway
6493 so there is not much to do if this times out
6495 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6497 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6498 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6499 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6500 #ifdef BNX2X_STOP_ON_ERROR
6510 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6511 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6516 static void bnx2x_reset_func(struct bnx2x *bp)
6518 int port = BP_PORT(bp);
6519 int func = BP_FUNC(bp);
6523 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6524 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6526 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6529 base = FUNC_ILT_BASE(func);
6530 for (i = base; i < base + ILT_PER_FUNC; i++)
6531 bnx2x_ilt_wr(bp, i, 0);
6534 static void bnx2x_reset_port(struct bnx2x *bp)
6536 int port = BP_PORT(bp);
6539 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6541 /* Do not rcv packets to BRB */
6542 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6543 /* Do not direct rcv packets that are not for MCP to the BRB */
6544 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6545 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6548 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6551 /* Check for BRB port occupancy */
6552 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6554 DP(NETIF_MSG_IFDOWN,
6555 "BRB1 is not empty %d blooks are occupied\n", val);
6557 /* TODO: Close Doorbell port? */
6560 static void bnx2x_reset_common(struct bnx2x *bp)
6563 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6565 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6568 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6570 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6571 BP_FUNC(bp), reset_code);
6573 switch (reset_code) {
6574 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6575 bnx2x_reset_port(bp);
6576 bnx2x_reset_func(bp);
6577 bnx2x_reset_common(bp);
6580 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6581 bnx2x_reset_port(bp);
6582 bnx2x_reset_func(bp);
6585 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6586 bnx2x_reset_func(bp);
6590 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6595 /* msut be called with rtnl_lock */
6596 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6598 int port = BP_PORT(bp);
6602 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6604 bp->rx_mode = BNX2X_RX_MODE_NONE;
6605 bnx2x_set_storm_rx_mode(bp);
6607 if (netif_running(bp->dev)) {
6608 netif_tx_disable(bp->dev);
6609 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6612 del_timer_sync(&bp->timer);
6613 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6614 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6615 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6617 /* Wait until tx fast path tasks complete */
6618 for_each_queue(bp, i) {
6619 struct bnx2x_fastpath *fp = &bp->fp[i];
6623 while (BNX2X_HAS_TX_WORK(fp)) {
6625 if (!netif_running(bp->dev))
6626 bnx2x_tx_int(fp, 1000);
6629 BNX2X_ERR("timeout waiting for queue[%d]\n",
6631 #ifdef BNX2X_STOP_ON_ERROR
6644 /* Give HW time to discard old tx messages */
6647 for_each_queue(bp, i)
6648 napi_disable(&bnx2x_fp(bp, i, napi));
6649 /* Disable interrupts after Tx and Rx are disabled on stack level */
6650 bnx2x_int_disable_sync(bp);
6655 if (unload_mode == UNLOAD_NORMAL)
6656 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6658 else if (bp->flags & NO_WOL_FLAG) {
6659 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6660 if (CHIP_IS_E1H(bp))
6661 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6663 } else if (bp->wol) {
6664 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6665 u8 *mac_addr = bp->dev->dev_addr;
6667 /* The mac address is written to entries 1-4 to
6668 preserve entry 0 which is used by the PMF */
6669 u8 entry = (BP_E1HVN(bp) + 1)*8;
6671 val = (mac_addr[0] << 8) | mac_addr[1];
6672 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6674 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6675 (mac_addr[4] << 8) | mac_addr[5];
6676 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6678 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6681 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6683 if (CHIP_IS_E1(bp)) {
6684 struct mac_configuration_cmd *config =
6685 bnx2x_sp(bp, mcast_config);
6687 bnx2x_set_mac_addr_e1(bp, 0);
6689 for (i = 0; i < config->hdr.length_6b; i++)
6690 CAM_INVALIDATE(config->config_table[i]);
6692 config->hdr.length_6b = i;
6693 if (CHIP_REV_IS_SLOW(bp))
6694 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6696 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6697 config->hdr.client_id = BP_CL_ID(bp);
6698 config->hdr.reserved1 = 0;
6700 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6701 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6702 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6705 bnx2x_set_mac_addr_e1h(bp, 0);
6707 for (i = 0; i < MC_HASH_SIZE; i++)
6708 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6711 if (CHIP_IS_E1H(bp))
6712 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6714 /* Close multi and leading connections
6715 Completions for ramrods are collected in a synchronous way */
6716 for_each_nondefault_queue(bp, i)
6717 if (bnx2x_stop_multi(bp, i))
6720 rc = bnx2x_stop_leading(bp);
6722 BNX2X_ERR("Stop leading failed!\n");
6723 #ifdef BNX2X_STOP_ON_ERROR
6732 reset_code = bnx2x_fw_command(bp, reset_code);
6734 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6735 load_count[0], load_count[1], load_count[2]);
6737 load_count[1 + port]--;
6738 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6739 load_count[0], load_count[1], load_count[2]);
6740 if (load_count[0] == 0)
6741 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6742 else if (load_count[1 + port] == 0)
6743 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6745 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6748 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6749 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6750 bnx2x__link_reset(bp);
6752 /* Reset the chip */
6753 bnx2x_reset_chip(bp, reset_code);
6755 /* Report UNLOAD_DONE to MCP */
6757 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6759 /* Free SKBs, SGEs, TPA pool and driver internals */
6760 bnx2x_free_skbs(bp);
6761 for_each_queue(bp, i)
6762 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6763 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6766 bp->state = BNX2X_STATE_CLOSED;
6768 netif_carrier_off(bp->dev);
6773 static void bnx2x_reset_task(struct work_struct *work)
6775 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6777 #ifdef BNX2X_STOP_ON_ERROR
6778 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6779 " so reset not done to allow debug dump,\n"
6780 KERN_ERR " you will need to reboot when done\n");
6786 if (!netif_running(bp->dev))
6787 goto reset_task_exit;
6789 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6790 bnx2x_nic_load(bp, LOAD_NORMAL);
6796 /* end of nic load/unload */
6801 * Init service functions
6804 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6808 /* Check if there is any driver already loaded */
6809 val = REG_RD(bp, MISC_REG_UNPREPARED);
6811 /* Check if it is the UNDI driver
6812 * UNDI driver initializes CID offset for normal bell to 0x7
6814 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6815 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6817 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6819 int func = BP_FUNC(bp);
6823 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6825 /* try unload UNDI on port 0 */
6828 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6829 DRV_MSG_SEQ_NUMBER_MASK);
6830 reset_code = bnx2x_fw_command(bp, reset_code);
6832 /* if UNDI is loaded on the other port */
6833 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6835 /* send "DONE" for previous unload */
6836 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6838 /* unload UNDI on port 1 */
6841 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6842 DRV_MSG_SEQ_NUMBER_MASK);
6843 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6845 bnx2x_fw_command(bp, reset_code);
6848 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6849 HC_REG_CONFIG_0), 0x1000);
6851 /* close input traffic and wait for it */
6852 /* Do not rcv packets to BRB */
6854 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6855 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6856 /* Do not direct rcv packets that are not for MCP to
6859 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6860 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6863 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6864 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6867 /* save NIG port swap info */
6868 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6869 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6872 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6875 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6877 /* take the NIG out of reset and restore swap values */
6879 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6880 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6881 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6882 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6884 /* send unload done to the MCP */
6885 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6887 /* restore our func and fw_seq */
6890 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6891 DRV_MSG_SEQ_NUMBER_MASK);
6893 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6897 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6899 u32 val, val2, val3, val4, id;
6901 /* Get the chip revision id and number. */
6902 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6903 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6904 id = ((val & 0xffff) << 16);
6905 val = REG_RD(bp, MISC_REG_CHIP_REV);
6906 id |= ((val & 0xf) << 12);
6907 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6908 id |= ((val & 0xff) << 4);
6909 REG_RD(bp, MISC_REG_BOND_ID);
6911 bp->common.chip_id = id;
6912 bp->link_params.chip_id = bp->common.chip_id;
6913 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6915 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6916 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6917 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6918 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6919 bp->common.flash_size, bp->common.flash_size);
6921 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6922 bp->link_params.shmem_base = bp->common.shmem_base;
6923 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6925 if (!bp->common.shmem_base ||
6926 (bp->common.shmem_base < 0xA0000) ||
6927 (bp->common.shmem_base >= 0xC0000)) {
6928 BNX2X_DEV_INFO("MCP not active\n");
6929 bp->flags |= NO_MCP_FLAG;
6933 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6934 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6935 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6936 BNX2X_ERR("BAD MCP validity signature\n");
6938 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6939 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6941 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6942 bp->common.hw_config, bp->common.board);
6944 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6945 SHARED_HW_CFG_LED_MODE_MASK) >>
6946 SHARED_HW_CFG_LED_MODE_SHIFT);
6948 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6949 bp->common.bc_ver = val;
6950 BNX2X_DEV_INFO("bc_ver %X\n", val);
6951 if (val < BNX2X_BC_VER) {
6952 /* for now only warn
6953 * later we might need to enforce this */
6954 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6955 " please upgrade BC\n", BNX2X_BC_VER, val);
6957 BNX2X_DEV_INFO("%sWoL Capable\n",
6958 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6960 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6961 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6962 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6963 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6965 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6966 val, val2, val3, val4);
6969 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6972 int port = BP_PORT(bp);
6975 switch (switch_cfg) {
6977 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6980 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6981 switch (ext_phy_type) {
6982 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6983 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6986 bp->port.supported |= (SUPPORTED_10baseT_Half |
6987 SUPPORTED_10baseT_Full |
6988 SUPPORTED_100baseT_Half |
6989 SUPPORTED_100baseT_Full |
6990 SUPPORTED_1000baseT_Full |
6991 SUPPORTED_2500baseX_Full |
6996 SUPPORTED_Asym_Pause);
6999 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7000 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7003 bp->port.supported |= (SUPPORTED_10baseT_Half |
7004 SUPPORTED_10baseT_Full |
7005 SUPPORTED_100baseT_Half |
7006 SUPPORTED_100baseT_Full |
7007 SUPPORTED_1000baseT_Full |
7012 SUPPORTED_Asym_Pause);
7016 BNX2X_ERR("NVRAM config error. "
7017 "BAD SerDes ext_phy_config 0x%x\n",
7018 bp->link_params.ext_phy_config);
7022 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7024 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7027 case SWITCH_CFG_10G:
7028 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7031 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7032 switch (ext_phy_type) {
7033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7034 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7037 bp->port.supported |= (SUPPORTED_10baseT_Half |
7038 SUPPORTED_10baseT_Full |
7039 SUPPORTED_100baseT_Half |
7040 SUPPORTED_100baseT_Full |
7041 SUPPORTED_1000baseT_Full |
7042 SUPPORTED_2500baseX_Full |
7043 SUPPORTED_10000baseT_Full |
7048 SUPPORTED_Asym_Pause);
7051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7052 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7055 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7058 SUPPORTED_Asym_Pause);
7061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7062 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7065 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7066 SUPPORTED_1000baseT_Full |
7069 SUPPORTED_Asym_Pause);
7072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7073 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7076 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7077 SUPPORTED_1000baseT_Full |
7081 SUPPORTED_Asym_Pause);
7084 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7085 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7088 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7089 SUPPORTED_2500baseX_Full |
7090 SUPPORTED_1000baseT_Full |
7094 SUPPORTED_Asym_Pause);
7097 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7098 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7101 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7105 SUPPORTED_Asym_Pause);
7108 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7109 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7110 bp->link_params.ext_phy_config);
7114 BNX2X_ERR("NVRAM config error. "
7115 "BAD XGXS ext_phy_config 0x%x\n",
7116 bp->link_params.ext_phy_config);
7120 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7122 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7127 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7128 bp->port.link_config);
7131 bp->link_params.phy_addr = bp->port.phy_addr;
7133 /* mask what we support according to speed_cap_mask */
7134 if (!(bp->link_params.speed_cap_mask &
7135 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7136 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7138 if (!(bp->link_params.speed_cap_mask &
7139 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7140 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7142 if (!(bp->link_params.speed_cap_mask &
7143 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7144 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7146 if (!(bp->link_params.speed_cap_mask &
7147 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7148 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7150 if (!(bp->link_params.speed_cap_mask &
7151 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7152 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7153 SUPPORTED_1000baseT_Full);
7155 if (!(bp->link_params.speed_cap_mask &
7156 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7157 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7159 if (!(bp->link_params.speed_cap_mask &
7160 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7161 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7163 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7166 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7168 bp->link_params.req_duplex = DUPLEX_FULL;
7170 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7171 case PORT_FEATURE_LINK_SPEED_AUTO:
7172 if (bp->port.supported & SUPPORTED_Autoneg) {
7173 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7174 bp->port.advertising = bp->port.supported;
7177 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7179 if ((ext_phy_type ==
7180 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7182 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7183 /* force 10G, no AN */
7184 bp->link_params.req_line_speed = SPEED_10000;
7185 bp->port.advertising =
7186 (ADVERTISED_10000baseT_Full |
7190 BNX2X_ERR("NVRAM config error. "
7191 "Invalid link_config 0x%x"
7192 " Autoneg not supported\n",
7193 bp->port.link_config);
7198 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7199 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7200 bp->link_params.req_line_speed = SPEED_10;
7201 bp->port.advertising = (ADVERTISED_10baseT_Full |
7204 BNX2X_ERR("NVRAM config error. "
7205 "Invalid link_config 0x%x"
7206 " speed_cap_mask 0x%x\n",
7207 bp->port.link_config,
7208 bp->link_params.speed_cap_mask);
7213 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7214 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7215 bp->link_params.req_line_speed = SPEED_10;
7216 bp->link_params.req_duplex = DUPLEX_HALF;
7217 bp->port.advertising = (ADVERTISED_10baseT_Half |
7220 BNX2X_ERR("NVRAM config error. "
7221 "Invalid link_config 0x%x"
7222 " speed_cap_mask 0x%x\n",
7223 bp->port.link_config,
7224 bp->link_params.speed_cap_mask);
7229 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7230 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7231 bp->link_params.req_line_speed = SPEED_100;
7232 bp->port.advertising = (ADVERTISED_100baseT_Full |
7235 BNX2X_ERR("NVRAM config error. "
7236 "Invalid link_config 0x%x"
7237 " speed_cap_mask 0x%x\n",
7238 bp->port.link_config,
7239 bp->link_params.speed_cap_mask);
7244 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7245 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7246 bp->link_params.req_line_speed = SPEED_100;
7247 bp->link_params.req_duplex = DUPLEX_HALF;
7248 bp->port.advertising = (ADVERTISED_100baseT_Half |
7251 BNX2X_ERR("NVRAM config error. "
7252 "Invalid link_config 0x%x"
7253 " speed_cap_mask 0x%x\n",
7254 bp->port.link_config,
7255 bp->link_params.speed_cap_mask);
7260 case PORT_FEATURE_LINK_SPEED_1G:
7261 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7262 bp->link_params.req_line_speed = SPEED_1000;
7263 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7266 BNX2X_ERR("NVRAM config error. "
7267 "Invalid link_config 0x%x"
7268 " speed_cap_mask 0x%x\n",
7269 bp->port.link_config,
7270 bp->link_params.speed_cap_mask);
7275 case PORT_FEATURE_LINK_SPEED_2_5G:
7276 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7277 bp->link_params.req_line_speed = SPEED_2500;
7278 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7281 BNX2X_ERR("NVRAM config error. "
7282 "Invalid link_config 0x%x"
7283 " speed_cap_mask 0x%x\n",
7284 bp->port.link_config,
7285 bp->link_params.speed_cap_mask);
7290 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7291 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7292 case PORT_FEATURE_LINK_SPEED_10G_KR:
7293 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7294 bp->link_params.req_line_speed = SPEED_10000;
7295 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7298 BNX2X_ERR("NVRAM config error. "
7299 "Invalid link_config 0x%x"
7300 " speed_cap_mask 0x%x\n",
7301 bp->port.link_config,
7302 bp->link_params.speed_cap_mask);
7308 BNX2X_ERR("NVRAM config error. "
7309 "BAD link speed link_config 0x%x\n",
7310 bp->port.link_config);
7311 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7312 bp->port.advertising = bp->port.supported;
7316 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7317 PORT_FEATURE_FLOW_CONTROL_MASK);
7318 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7319 !(bp->port.supported & SUPPORTED_Autoneg))
7320 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7322 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7323 " advertising 0x%x\n",
7324 bp->link_params.req_line_speed,
7325 bp->link_params.req_duplex,
7326 bp->link_params.req_flow_ctrl, bp->port.advertising);
7329 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7331 int port = BP_PORT(bp);
7334 bp->link_params.bp = bp;
7335 bp->link_params.port = port;
7337 bp->link_params.serdes_config =
7338 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7339 bp->link_params.lane_config =
7340 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7341 bp->link_params.ext_phy_config =
7343 dev_info.port_hw_config[port].external_phy_config);
7344 bp->link_params.speed_cap_mask =
7346 dev_info.port_hw_config[port].speed_capability_mask);
7348 bp->port.link_config =
7349 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7351 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7352 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7353 " link_config 0x%08x\n",
7354 bp->link_params.serdes_config,
7355 bp->link_params.lane_config,
7356 bp->link_params.ext_phy_config,
7357 bp->link_params.speed_cap_mask, bp->port.link_config);
7359 bp->link_params.switch_cfg = (bp->port.link_config &
7360 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7361 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7363 bnx2x_link_settings_requested(bp);
7365 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7366 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7367 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7368 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7369 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7370 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7371 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7372 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7373 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7374 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7377 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7379 int func = BP_FUNC(bp);
7383 bnx2x_get_common_hwinfo(bp);
7387 if (CHIP_IS_E1H(bp)) {
7389 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7392 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7393 FUNC_MF_CFG_E1HOV_TAG_MASK);
7394 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7398 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7400 func, bp->e1hov, bp->e1hov);
7402 BNX2X_DEV_INFO("Single function mode\n");
7404 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7405 " aborting\n", func);
7411 if (!BP_NOMCP(bp)) {
7412 bnx2x_get_port_hwinfo(bp);
7414 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7415 DRV_MSG_SEQ_NUMBER_MASK);
7416 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7420 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7421 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7422 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7423 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7424 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7425 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7426 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7427 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7428 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7429 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7430 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7432 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7440 /* only supposed to happen on emulation/FPGA */
7441 BNX2X_ERR("warning rendom MAC workaround active\n");
7442 random_ether_addr(bp->dev->dev_addr);
7443 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7449 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7451 int func = BP_FUNC(bp);
7454 /* Disable interrupt handling until HW is initialized */
7455 atomic_set(&bp->intr_sem, 1);
7457 mutex_init(&bp->port.phy_mutex);
7459 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7460 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7462 rc = bnx2x_get_hwinfo(bp);
7464 /* need to reset chip if undi was active */
7466 bnx2x_undi_unload(bp);
7468 if (CHIP_REV_IS_FPGA(bp))
7469 printk(KERN_ERR PFX "FPGA detected\n");
7471 if (BP_NOMCP(bp) && (func == 0))
7473 "MCP disabled, must load devices in order!\n");
7477 bp->flags &= ~TPA_ENABLE_FLAG;
7478 bp->dev->features &= ~NETIF_F_LRO;
7480 bp->flags |= TPA_ENABLE_FLAG;
7481 bp->dev->features |= NETIF_F_LRO;
7485 bp->tx_ring_size = MAX_TX_AVAIL;
7486 bp->rx_ring_size = MAX_RX_AVAIL;
7494 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7495 bp->current_interval = (poll ? poll : bp->timer_interval);
7497 init_timer(&bp->timer);
7498 bp->timer.expires = jiffies + bp->current_interval;
7499 bp->timer.data = (unsigned long) bp;
7500 bp->timer.function = bnx2x_timer;
7506 * ethtool service functions
7509 /* All ethtool functions called with rtnl_lock */
7511 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7513 struct bnx2x *bp = netdev_priv(dev);
7515 cmd->supported = bp->port.supported;
7516 cmd->advertising = bp->port.advertising;
7518 if (netif_carrier_ok(dev)) {
7519 cmd->speed = bp->link_vars.line_speed;
7520 cmd->duplex = bp->link_vars.duplex;
7522 cmd->speed = bp->link_params.req_line_speed;
7523 cmd->duplex = bp->link_params.req_duplex;
7528 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7529 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7530 if (vn_max_rate < cmd->speed)
7531 cmd->speed = vn_max_rate;
7534 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7536 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7538 switch (ext_phy_type) {
7539 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7540 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7543 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7544 cmd->port = PORT_FIBRE;
7547 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7548 cmd->port = PORT_TP;
7551 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7552 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7553 bp->link_params.ext_phy_config);
7557 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7558 bp->link_params.ext_phy_config);
7562 cmd->port = PORT_TP;
7564 cmd->phy_address = bp->port.phy_addr;
7565 cmd->transceiver = XCVR_INTERNAL;
7567 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7568 cmd->autoneg = AUTONEG_ENABLE;
7570 cmd->autoneg = AUTONEG_DISABLE;
7575 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7576 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7577 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7578 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7579 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7580 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7581 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7586 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7588 struct bnx2x *bp = netdev_priv(dev);
7594 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7595 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7596 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7597 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7598 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7599 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7600 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7602 if (cmd->autoneg == AUTONEG_ENABLE) {
7603 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7604 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7608 /* advertise the requested speed and duplex if supported */
7609 cmd->advertising &= bp->port.supported;
7611 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7612 bp->link_params.req_duplex = DUPLEX_FULL;
7613 bp->port.advertising |= (ADVERTISED_Autoneg |
7616 } else { /* forced speed */
7617 /* advertise the requested speed and duplex if supported */
7618 switch (cmd->speed) {
7620 if (cmd->duplex == DUPLEX_FULL) {
7621 if (!(bp->port.supported &
7622 SUPPORTED_10baseT_Full)) {
7624 "10M full not supported\n");
7628 advertising = (ADVERTISED_10baseT_Full |
7631 if (!(bp->port.supported &
7632 SUPPORTED_10baseT_Half)) {
7634 "10M half not supported\n");
7638 advertising = (ADVERTISED_10baseT_Half |
7644 if (cmd->duplex == DUPLEX_FULL) {
7645 if (!(bp->port.supported &
7646 SUPPORTED_100baseT_Full)) {
7648 "100M full not supported\n");
7652 advertising = (ADVERTISED_100baseT_Full |
7655 if (!(bp->port.supported &
7656 SUPPORTED_100baseT_Half)) {
7658 "100M half not supported\n");
7662 advertising = (ADVERTISED_100baseT_Half |
7668 if (cmd->duplex != DUPLEX_FULL) {
7669 DP(NETIF_MSG_LINK, "1G half not supported\n");
7673 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7674 DP(NETIF_MSG_LINK, "1G full not supported\n");
7678 advertising = (ADVERTISED_1000baseT_Full |
7683 if (cmd->duplex != DUPLEX_FULL) {
7685 "2.5G half not supported\n");
7689 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7691 "2.5G full not supported\n");
7695 advertising = (ADVERTISED_2500baseX_Full |
7700 if (cmd->duplex != DUPLEX_FULL) {
7701 DP(NETIF_MSG_LINK, "10G half not supported\n");
7705 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7706 DP(NETIF_MSG_LINK, "10G full not supported\n");
7710 advertising = (ADVERTISED_10000baseT_Full |
7715 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7719 bp->link_params.req_line_speed = cmd->speed;
7720 bp->link_params.req_duplex = cmd->duplex;
7721 bp->port.advertising = advertising;
7724 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7725 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7726 bp->link_params.req_line_speed, bp->link_params.req_duplex,
7727 bp->port.advertising);
7729 if (netif_running(dev)) {
7730 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7737 #define PHY_FW_VER_LEN 10
7739 static void bnx2x_get_drvinfo(struct net_device *dev,
7740 struct ethtool_drvinfo *info)
7742 struct bnx2x *bp = netdev_priv(dev);
7743 char phy_fw_ver[PHY_FW_VER_LEN];
7745 strcpy(info->driver, DRV_MODULE_NAME);
7746 strcpy(info->version, DRV_MODULE_VERSION);
7748 phy_fw_ver[0] = '\0';
7750 bnx2x_acquire_phy_lock(bp);
7751 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7752 (bp->state != BNX2X_STATE_CLOSED),
7753 phy_fw_ver, PHY_FW_VER_LEN);
7754 bnx2x_release_phy_lock(bp);
7757 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7758 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7759 BCM_5710_FW_REVISION_VERSION,
7760 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7761 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7762 strcpy(info->bus_info, pci_name(bp->pdev));
7763 info->n_stats = BNX2X_NUM_STATS;
7764 info->testinfo_len = BNX2X_NUM_TESTS;
7765 info->eedump_len = bp->common.flash_size;
7766 info->regdump_len = 0;
7769 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7771 struct bnx2x *bp = netdev_priv(dev);
7773 if (bp->flags & NO_WOL_FLAG) {
7777 wol->supported = WAKE_MAGIC;
7779 wol->wolopts = WAKE_MAGIC;
7783 memset(&wol->sopass, 0, sizeof(wol->sopass));
7786 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7788 struct bnx2x *bp = netdev_priv(dev);
7790 if (wol->wolopts & ~WAKE_MAGIC)
7793 if (wol->wolopts & WAKE_MAGIC) {
7794 if (bp->flags & NO_WOL_FLAG)
7804 static u32 bnx2x_get_msglevel(struct net_device *dev)
7806 struct bnx2x *bp = netdev_priv(dev);
7808 return bp->msglevel;
7811 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7813 struct bnx2x *bp = netdev_priv(dev);
7815 if (capable(CAP_NET_ADMIN))
7816 bp->msglevel = level;
7819 static int bnx2x_nway_reset(struct net_device *dev)
7821 struct bnx2x *bp = netdev_priv(dev);
7826 if (netif_running(dev)) {
7827 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7834 static int bnx2x_get_eeprom_len(struct net_device *dev)
7836 struct bnx2x *bp = netdev_priv(dev);
7838 return bp->common.flash_size;
7841 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7843 int port = BP_PORT(bp);
7847 /* adjust timeout for emulation/FPGA */
7848 count = NVRAM_TIMEOUT_COUNT;
7849 if (CHIP_REV_IS_SLOW(bp))
7852 /* request access to nvram interface */
7853 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7854 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7856 for (i = 0; i < count*10; i++) {
7857 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7858 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7864 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7865 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7872 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7874 int port = BP_PORT(bp);
7878 /* adjust timeout for emulation/FPGA */
7879 count = NVRAM_TIMEOUT_COUNT;
7880 if (CHIP_REV_IS_SLOW(bp))
7883 /* relinquish nvram interface */
7884 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7885 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7887 for (i = 0; i < count*10; i++) {
7888 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7889 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7895 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7896 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7903 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7907 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7909 /* enable both bits, even on read */
7910 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7911 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7912 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7915 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7919 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7921 /* disable both bits, even after read */
7922 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7923 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7924 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7927 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7933 /* build the command word */
7934 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7936 /* need to clear DONE bit separately */
7937 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7939 /* address of the NVRAM to read from */
7940 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7941 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7943 /* issue a read command */
7944 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7946 /* adjust timeout for emulation/FPGA */
7947 count = NVRAM_TIMEOUT_COUNT;
7948 if (CHIP_REV_IS_SLOW(bp))
7951 /* wait for completion */
7954 for (i = 0; i < count; i++) {
7956 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7958 if (val & MCPR_NVM_COMMAND_DONE) {
7959 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7960 /* we read nvram data in cpu order
7961 * but ethtool sees it as an array of bytes
7962 * converting to big-endian will do the work */
7963 val = cpu_to_be32(val);
7973 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7980 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7982 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7987 if (offset + buf_size > bp->common.flash_size) {
7988 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7989 " buf_size (0x%x) > flash_size (0x%x)\n",
7990 offset, buf_size, bp->common.flash_size);
7994 /* request access to nvram interface */
7995 rc = bnx2x_acquire_nvram_lock(bp);
7999 /* enable access to nvram interface */
8000 bnx2x_enable_nvram_access(bp);
8002 /* read the first word(s) */
8003 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8004 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8005 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8006 memcpy(ret_buf, &val, 4);
8008 /* advance to the next dword */
8009 offset += sizeof(u32);
8010 ret_buf += sizeof(u32);
8011 buf_size -= sizeof(u32);
8016 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8017 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8018 memcpy(ret_buf, &val, 4);
8021 /* disable access to nvram interface */
8022 bnx2x_disable_nvram_access(bp);
8023 bnx2x_release_nvram_lock(bp);
8028 static int bnx2x_get_eeprom(struct net_device *dev,
8029 struct ethtool_eeprom *eeprom, u8 *eebuf)
8031 struct bnx2x *bp = netdev_priv(dev);
8034 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8035 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8036 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8037 eeprom->len, eeprom->len);
8039 /* parameters already validated in ethtool_get_eeprom */
8041 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8046 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8051 /* build the command word */
8052 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8054 /* need to clear DONE bit separately */
8055 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8057 /* write the data */
8058 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8060 /* address of the NVRAM to write to */
8061 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8062 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8064 /* issue the write command */
8065 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8067 /* adjust timeout for emulation/FPGA */
8068 count = NVRAM_TIMEOUT_COUNT;
8069 if (CHIP_REV_IS_SLOW(bp))
8072 /* wait for completion */
8074 for (i = 0; i < count; i++) {
8076 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8077 if (val & MCPR_NVM_COMMAND_DONE) {
8086 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8088 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8096 if (offset + buf_size > bp->common.flash_size) {
8097 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8098 " buf_size (0x%x) > flash_size (0x%x)\n",
8099 offset, buf_size, bp->common.flash_size);
8103 /* request access to nvram interface */
8104 rc = bnx2x_acquire_nvram_lock(bp);
8108 /* enable access to nvram interface */
8109 bnx2x_enable_nvram_access(bp);
8111 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8112 align_offset = (offset & ~0x03);
8113 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8116 val &= ~(0xff << BYTE_OFFSET(offset));
8117 val |= (*data_buf << BYTE_OFFSET(offset));
8119 /* nvram data is returned as an array of bytes
8120 * convert it back to cpu order */
8121 val = be32_to_cpu(val);
8123 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8127 /* disable access to nvram interface */
8128 bnx2x_disable_nvram_access(bp);
8129 bnx2x_release_nvram_lock(bp);
8134 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8142 if (buf_size == 1) /* ethtool */
8143 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8145 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8147 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8152 if (offset + buf_size > bp->common.flash_size) {
8153 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8154 " buf_size (0x%x) > flash_size (0x%x)\n",
8155 offset, buf_size, bp->common.flash_size);
8159 /* request access to nvram interface */
8160 rc = bnx2x_acquire_nvram_lock(bp);
8164 /* enable access to nvram interface */
8165 bnx2x_enable_nvram_access(bp);
8168 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8169 while ((written_so_far < buf_size) && (rc == 0)) {
8170 if (written_so_far == (buf_size - sizeof(u32)))
8171 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8172 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8173 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8174 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8175 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8177 memcpy(&val, data_buf, 4);
8179 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8181 /* advance to the next dword */
8182 offset += sizeof(u32);
8183 data_buf += sizeof(u32);
8184 written_so_far += sizeof(u32);
8188 /* disable access to nvram interface */
8189 bnx2x_disable_nvram_access(bp);
8190 bnx2x_release_nvram_lock(bp);
8195 static int bnx2x_set_eeprom(struct net_device *dev,
8196 struct ethtool_eeprom *eeprom, u8 *eebuf)
8198 struct bnx2x *bp = netdev_priv(dev);
8201 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8202 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8203 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8204 eeprom->len, eeprom->len);
8206 /* parameters already validated in ethtool_set_eeprom */
8208 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8209 if (eeprom->magic == 0x00504859)
8212 bnx2x_acquire_phy_lock(bp);
8213 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8214 bp->link_params.ext_phy_config,
8215 (bp->state != BNX2X_STATE_CLOSED),
8216 eebuf, eeprom->len);
8217 if ((bp->state == BNX2X_STATE_OPEN) ||
8218 (bp->state == BNX2X_STATE_DISABLED)) {
8219 rc |= bnx2x_link_reset(&bp->link_params,
8221 rc |= bnx2x_phy_init(&bp->link_params,
8224 bnx2x_release_phy_lock(bp);
8226 } else /* Only the PMF can access the PHY */
8229 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8234 static int bnx2x_get_coalesce(struct net_device *dev,
8235 struct ethtool_coalesce *coal)
8237 struct bnx2x *bp = netdev_priv(dev);
8239 memset(coal, 0, sizeof(struct ethtool_coalesce));
8241 coal->rx_coalesce_usecs = bp->rx_ticks;
8242 coal->tx_coalesce_usecs = bp->tx_ticks;
8247 static int bnx2x_set_coalesce(struct net_device *dev,
8248 struct ethtool_coalesce *coal)
8250 struct bnx2x *bp = netdev_priv(dev);
8252 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8253 if (bp->rx_ticks > 3000)
8254 bp->rx_ticks = 3000;
8256 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8257 if (bp->tx_ticks > 0x3000)
8258 bp->tx_ticks = 0x3000;
8260 if (netif_running(dev))
8261 bnx2x_update_coalesce(bp);
8266 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8268 struct bnx2x *bp = netdev_priv(dev);
8272 if (data & ETH_FLAG_LRO) {
8273 if (!(dev->features & NETIF_F_LRO)) {
8274 dev->features |= NETIF_F_LRO;
8275 bp->flags |= TPA_ENABLE_FLAG;
8279 } else if (dev->features & NETIF_F_LRO) {
8280 dev->features &= ~NETIF_F_LRO;
8281 bp->flags &= ~TPA_ENABLE_FLAG;
8285 if (changed && netif_running(dev)) {
8286 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8287 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8293 static void bnx2x_get_ringparam(struct net_device *dev,
8294 struct ethtool_ringparam *ering)
8296 struct bnx2x *bp = netdev_priv(dev);
8298 ering->rx_max_pending = MAX_RX_AVAIL;
8299 ering->rx_mini_max_pending = 0;
8300 ering->rx_jumbo_max_pending = 0;
8302 ering->rx_pending = bp->rx_ring_size;
8303 ering->rx_mini_pending = 0;
8304 ering->rx_jumbo_pending = 0;
8306 ering->tx_max_pending = MAX_TX_AVAIL;
8307 ering->tx_pending = bp->tx_ring_size;
8310 static int bnx2x_set_ringparam(struct net_device *dev,
8311 struct ethtool_ringparam *ering)
8313 struct bnx2x *bp = netdev_priv(dev);
8316 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8317 (ering->tx_pending > MAX_TX_AVAIL) ||
8318 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8321 bp->rx_ring_size = ering->rx_pending;
8322 bp->tx_ring_size = ering->tx_pending;
8324 if (netif_running(dev)) {
8325 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8326 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8332 static void bnx2x_get_pauseparam(struct net_device *dev,
8333 struct ethtool_pauseparam *epause)
8335 struct bnx2x *bp = netdev_priv(dev);
8337 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8338 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8340 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8342 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8345 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8346 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8347 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8350 static int bnx2x_set_pauseparam(struct net_device *dev,
8351 struct ethtool_pauseparam *epause)
8353 struct bnx2x *bp = netdev_priv(dev);
8358 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8359 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8360 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8362 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8364 if (epause->rx_pause)
8365 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8367 if (epause->tx_pause)
8368 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8370 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8371 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8373 if (epause->autoneg) {
8374 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8375 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8379 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8380 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8384 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8386 if (netif_running(dev)) {
8387 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8394 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8396 struct bnx2x *bp = netdev_priv(dev);
8401 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8403 struct bnx2x *bp = netdev_priv(dev);
8409 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8412 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8413 dev->features |= NETIF_F_TSO6;
8415 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8416 dev->features &= ~NETIF_F_TSO6;
8422 static const struct {
8423 char string[ETH_GSTRING_LEN];
8424 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8425 { "register_test (offline)" },
8426 { "memory_test (offline)" },
8427 { "loopback_test (offline)" },
8428 { "nvram_test (online)" },
8429 { "interrupt_test (online)" },
8430 { "link_test (online)" },
8431 { "idle check (online)" },
8432 { "MC errors (online)" }
8435 static int bnx2x_self_test_count(struct net_device *dev)
8437 return BNX2X_NUM_TESTS;
8440 static int bnx2x_test_registers(struct bnx2x *bp)
8442 int idx, i, rc = -ENODEV;
8444 int port = BP_PORT(bp);
8445 static const struct {
8450 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8451 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8452 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8453 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8454 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8455 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8456 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8457 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8458 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8459 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8460 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8461 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8462 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8463 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8464 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8465 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8466 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8467 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8468 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8469 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8470 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8471 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8472 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8473 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8474 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8475 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8476 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8477 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8478 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8479 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8480 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8481 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8482 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8483 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8484 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8485 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8486 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8487 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8489 { 0xffffffff, 0, 0x00000000 }
8492 if (!netif_running(bp->dev))
8495 /* Repeat the test twice:
8496 First by writing 0x00000000, second by writing 0xffffffff */
8497 for (idx = 0; idx < 2; idx++) {
8504 wr_val = 0xffffffff;
8508 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8509 u32 offset, mask, save_val, val;
8511 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8512 mask = reg_tbl[i].mask;
8514 save_val = REG_RD(bp, offset);
8516 REG_WR(bp, offset, wr_val);
8517 val = REG_RD(bp, offset);
8519 /* Restore the original register's value */
8520 REG_WR(bp, offset, save_val);
8522 /* verify that value is as expected value */
8523 if ((val & mask) != (wr_val & mask))
8534 static int bnx2x_test_memory(struct bnx2x *bp)
8536 int i, j, rc = -ENODEV;
8538 static const struct {
8542 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8543 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8544 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8545 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8546 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8547 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8548 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8552 static const struct {
8558 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8559 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8560 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8561 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8562 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8563 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8565 { NULL, 0xffffffff, 0, 0 }
8568 if (!netif_running(bp->dev))
8571 /* Go through all the memories */
8572 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8573 for (j = 0; j < mem_tbl[i].size; j++)
8574 REG_RD(bp, mem_tbl[i].offset + j*4);
8576 /* Check the parity status */
8577 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8578 val = REG_RD(bp, prty_tbl[i].offset);
8579 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8580 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8582 "%s is 0x%x\n", prty_tbl[i].name, val);
8593 static void bnx2x_netif_start(struct bnx2x *bp)
8597 if (atomic_dec_and_test(&bp->intr_sem)) {
8598 if (netif_running(bp->dev)) {
8599 bnx2x_int_enable(bp);
8600 for_each_queue(bp, i)
8601 napi_enable(&bnx2x_fp(bp, i, napi));
8602 if (bp->state == BNX2X_STATE_OPEN)
8603 netif_wake_queue(bp->dev);
8608 static void bnx2x_netif_stop(struct bnx2x *bp)
8612 if (netif_running(bp->dev)) {
8613 netif_tx_disable(bp->dev);
8614 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8615 for_each_queue(bp, i)
8616 napi_disable(&bnx2x_fp(bp, i, napi));
8618 bnx2x_int_disable_sync(bp);
8621 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8626 while (bnx2x_link_test(bp) && cnt--)
8630 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8632 unsigned int pkt_size, num_pkts, i;
8633 struct sk_buff *skb;
8634 unsigned char *packet;
8635 struct bnx2x_fastpath *fp = &bp->fp[0];
8636 u16 tx_start_idx, tx_idx;
8637 u16 rx_start_idx, rx_idx;
8639 struct sw_tx_bd *tx_buf;
8640 struct eth_tx_bd *tx_bd;
8642 union eth_rx_cqe *cqe;
8644 struct sw_rx_bd *rx_buf;
8648 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8649 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8650 bnx2x_acquire_phy_lock(bp);
8651 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8652 bnx2x_release_phy_lock(bp);
8654 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8655 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8656 bnx2x_acquire_phy_lock(bp);
8657 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8658 bnx2x_release_phy_lock(bp);
8659 /* wait until link state is restored */
8660 bnx2x_wait_for_link(bp, link_up);
8666 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8669 goto test_loopback_exit;
8671 packet = skb_put(skb, pkt_size);
8672 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8673 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8674 for (i = ETH_HLEN; i < pkt_size; i++)
8675 packet[i] = (unsigned char) (i & 0xff);
8678 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8679 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8681 pkt_prod = fp->tx_pkt_prod++;
8682 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8683 tx_buf->first_bd = fp->tx_bd_prod;
8686 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8687 mapping = pci_map_single(bp->pdev, skb->data,
8688 skb_headlen(skb), PCI_DMA_TODEVICE);
8689 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8690 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8691 tx_bd->nbd = cpu_to_le16(1);
8692 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8693 tx_bd->vlan = cpu_to_le16(pkt_prod);
8694 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8695 ETH_TX_BD_FLAGS_END_BD);
8696 tx_bd->general_data = ((UNICAST_ADDRESS <<
8697 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8699 fp->hw_tx_prods->bds_prod =
8700 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8701 mb(); /* FW restriction: must not reorder writing nbd and packets */
8702 fp->hw_tx_prods->packets_prod =
8703 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8704 DOORBELL(bp, FP_IDX(fp), 0);
8710 bp->dev->trans_start = jiffies;
8714 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8715 if (tx_idx != tx_start_idx + num_pkts)
8716 goto test_loopback_exit;
8718 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8719 if (rx_idx != rx_start_idx + num_pkts)
8720 goto test_loopback_exit;
8722 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8723 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8724 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8725 goto test_loopback_rx_exit;
8727 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8728 if (len != pkt_size)
8729 goto test_loopback_rx_exit;
8731 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8733 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8734 for (i = ETH_HLEN; i < pkt_size; i++)
8735 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8736 goto test_loopback_rx_exit;
8740 test_loopback_rx_exit:
8741 bp->dev->last_rx = jiffies;
8743 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8744 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8745 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8746 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8748 /* Update producers */
8749 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8751 mmiowb(); /* keep prod updates ordered */
8754 bp->link_params.loopback_mode = LOOPBACK_NONE;
8759 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8763 if (!netif_running(bp->dev))
8764 return BNX2X_LOOPBACK_FAILED;
8766 bnx2x_netif_stop(bp);
8768 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8769 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8770 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8773 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8774 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8775 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8778 bnx2x_netif_start(bp);
8783 #define CRC32_RESIDUAL 0xdebb20e3
8785 static int bnx2x_test_nvram(struct bnx2x *bp)
8787 static const struct {
8791 { 0, 0x14 }, /* bootstrap */
8792 { 0x14, 0xec }, /* dir */
8793 { 0x100, 0x350 }, /* manuf_info */
8794 { 0x450, 0xf0 }, /* feature_info */
8795 { 0x640, 0x64 }, /* upgrade_key_info */
8797 { 0x708, 0x70 }, /* manuf_key_info */
8802 u8 *data = (u8 *)buf;
8806 rc = bnx2x_nvram_read(bp, 0, data, 4);
8808 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8809 goto test_nvram_exit;
8812 magic = be32_to_cpu(buf[0]);
8813 if (magic != 0x669955aa) {
8814 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8816 goto test_nvram_exit;
8819 for (i = 0; nvram_tbl[i].size; i++) {
8821 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8825 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8826 goto test_nvram_exit;
8829 csum = ether_crc_le(nvram_tbl[i].size, data);
8830 if (csum != CRC32_RESIDUAL) {
8832 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8834 goto test_nvram_exit;
8842 static int bnx2x_test_intr(struct bnx2x *bp)
8844 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8847 if (!netif_running(bp->dev))
8850 config->hdr.length_6b = 0;
8851 config->hdr.offset = 0;
8852 config->hdr.client_id = BP_CL_ID(bp);
8853 config->hdr.reserved1 = 0;
8855 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8856 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8857 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8859 bp->set_mac_pending++;
8860 for (i = 0; i < 10; i++) {
8861 if (!bp->set_mac_pending)
8863 msleep_interruptible(10);
8872 static void bnx2x_self_test(struct net_device *dev,
8873 struct ethtool_test *etest, u64 *buf)
8875 struct bnx2x *bp = netdev_priv(dev);
8877 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8879 if (!netif_running(dev))
8882 /* offline tests are not suppoerted in MF mode */
8884 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8886 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8889 link_up = bp->link_vars.link_up;
8890 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8891 bnx2x_nic_load(bp, LOAD_DIAG);
8892 /* wait until link state is restored */
8893 bnx2x_wait_for_link(bp, link_up);
8895 if (bnx2x_test_registers(bp) != 0) {
8897 etest->flags |= ETH_TEST_FL_FAILED;
8899 if (bnx2x_test_memory(bp) != 0) {
8901 etest->flags |= ETH_TEST_FL_FAILED;
8903 buf[2] = bnx2x_test_loopback(bp, link_up);
8905 etest->flags |= ETH_TEST_FL_FAILED;
8907 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8908 bnx2x_nic_load(bp, LOAD_NORMAL);
8909 /* wait until link state is restored */
8910 bnx2x_wait_for_link(bp, link_up);
8912 if (bnx2x_test_nvram(bp) != 0) {
8914 etest->flags |= ETH_TEST_FL_FAILED;
8916 if (bnx2x_test_intr(bp) != 0) {
8918 etest->flags |= ETH_TEST_FL_FAILED;
8921 if (bnx2x_link_test(bp) != 0) {
8923 etest->flags |= ETH_TEST_FL_FAILED;
8925 buf[7] = bnx2x_mc_assert(bp);
8927 etest->flags |= ETH_TEST_FL_FAILED;
8929 #ifdef BNX2X_EXTRA_DEBUG
8930 bnx2x_panic_dump(bp);
8934 static const struct {
8938 #define STATS_FLAGS_PORT 1
8939 #define STATS_FLAGS_FUNC 2
8940 u8 string[ETH_GSTRING_LEN];
8941 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8942 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8943 8, STATS_FLAGS_FUNC, "rx_bytes" },
8944 { STATS_OFFSET32(error_bytes_received_hi),
8945 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8946 { STATS_OFFSET32(total_bytes_transmitted_hi),
8947 8, STATS_FLAGS_FUNC, "tx_bytes" },
8948 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8949 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8950 { STATS_OFFSET32(total_unicast_packets_received_hi),
8951 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8952 { STATS_OFFSET32(total_multicast_packets_received_hi),
8953 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8954 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8955 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8956 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8957 8, STATS_FLAGS_FUNC, "tx_packets" },
8958 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8959 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8960 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8961 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8962 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8963 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8964 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8965 8, STATS_FLAGS_PORT, "rx_align_errors" },
8966 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8967 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8968 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8969 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8970 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8971 8, STATS_FLAGS_PORT, "tx_deferred" },
8972 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8973 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8974 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8975 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8976 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8977 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8978 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8979 8, STATS_FLAGS_PORT, "rx_fragments" },
8980 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8981 8, STATS_FLAGS_PORT, "rx_jabbers" },
8982 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8983 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8984 { STATS_OFFSET32(jabber_packets_received),
8985 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8986 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8987 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8988 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8989 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8990 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8991 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8992 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8993 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8994 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8995 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8996 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8997 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8998 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8999 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9000 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9001 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9002 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9003 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9004 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9005 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9006 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9007 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9008 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9009 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9010 { STATS_OFFSET32(mac_filter_discard),
9011 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9012 { STATS_OFFSET32(no_buff_discard),
9013 4, STATS_FLAGS_FUNC, "rx_discards" },
9014 { STATS_OFFSET32(xxoverflow_discard),
9015 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9016 { STATS_OFFSET32(brb_drop_hi),
9017 8, STATS_FLAGS_PORT, "brb_discard" },
9018 { STATS_OFFSET32(brb_truncate_hi),
9019 8, STATS_FLAGS_PORT, "brb_truncate" },
9020 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9021 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9022 { STATS_OFFSET32(rx_skb_alloc_failed),
9023 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9024 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9025 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9028 #define IS_NOT_E1HMF_STAT(bp, i) \
9029 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9031 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9033 struct bnx2x *bp = netdev_priv(dev);
9036 switch (stringset) {
9038 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9039 if (IS_NOT_E1HMF_STAT(bp, i))
9041 strcpy(buf + j*ETH_GSTRING_LEN,
9042 bnx2x_stats_arr[i].string);
9048 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9053 static int bnx2x_get_stats_count(struct net_device *dev)
9055 struct bnx2x *bp = netdev_priv(dev);
9056 int i, num_stats = 0;
9058 for (i = 0; i < BNX2X_NUM_STATS; i++) {
9059 if (IS_NOT_E1HMF_STAT(bp, i))
9066 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9067 struct ethtool_stats *stats, u64 *buf)
9069 struct bnx2x *bp = netdev_priv(dev);
9070 u32 *hw_stats = (u32 *)&bp->eth_stats;
9073 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9074 if (IS_NOT_E1HMF_STAT(bp, i))
9077 if (bnx2x_stats_arr[i].size == 0) {
9078 /* skip this counter */
9083 if (bnx2x_stats_arr[i].size == 4) {
9084 /* 4-byte counter */
9085 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9089 /* 8-byte counter */
9090 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9091 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9096 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9098 struct bnx2x *bp = netdev_priv(dev);
9099 int port = BP_PORT(bp);
9102 if (!netif_running(dev))
9111 for (i = 0; i < (data * 2); i++) {
9113 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9114 bp->link_params.hw_led_mode,
9115 bp->link_params.chip_id);
9117 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9118 bp->link_params.hw_led_mode,
9119 bp->link_params.chip_id);
9121 msleep_interruptible(500);
9122 if (signal_pending(current))
9126 if (bp->link_vars.link_up)
9127 bnx2x_set_led(bp, port, LED_MODE_OPER,
9128 bp->link_vars.line_speed,
9129 bp->link_params.hw_led_mode,
9130 bp->link_params.chip_id);
9135 static struct ethtool_ops bnx2x_ethtool_ops = {
9136 .get_settings = bnx2x_get_settings,
9137 .set_settings = bnx2x_set_settings,
9138 .get_drvinfo = bnx2x_get_drvinfo,
9139 .get_wol = bnx2x_get_wol,
9140 .set_wol = bnx2x_set_wol,
9141 .get_msglevel = bnx2x_get_msglevel,
9142 .set_msglevel = bnx2x_set_msglevel,
9143 .nway_reset = bnx2x_nway_reset,
9144 .get_link = ethtool_op_get_link,
9145 .get_eeprom_len = bnx2x_get_eeprom_len,
9146 .get_eeprom = bnx2x_get_eeprom,
9147 .set_eeprom = bnx2x_set_eeprom,
9148 .get_coalesce = bnx2x_get_coalesce,
9149 .set_coalesce = bnx2x_set_coalesce,
9150 .get_ringparam = bnx2x_get_ringparam,
9151 .set_ringparam = bnx2x_set_ringparam,
9152 .get_pauseparam = bnx2x_get_pauseparam,
9153 .set_pauseparam = bnx2x_set_pauseparam,
9154 .get_rx_csum = bnx2x_get_rx_csum,
9155 .set_rx_csum = bnx2x_set_rx_csum,
9156 .get_tx_csum = ethtool_op_get_tx_csum,
9157 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9158 .set_flags = bnx2x_set_flags,
9159 .get_flags = ethtool_op_get_flags,
9160 .get_sg = ethtool_op_get_sg,
9161 .set_sg = ethtool_op_set_sg,
9162 .get_tso = ethtool_op_get_tso,
9163 .set_tso = bnx2x_set_tso,
9164 .self_test_count = bnx2x_self_test_count,
9165 .self_test = bnx2x_self_test,
9166 .get_strings = bnx2x_get_strings,
9167 .phys_id = bnx2x_phys_id,
9168 .get_stats_count = bnx2x_get_stats_count,
9169 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9172 /* end of ethtool_ops */
9174 /****************************************************************************
9175 * General service functions
9176 ****************************************************************************/
9178 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9182 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9186 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9187 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9188 PCI_PM_CTRL_PME_STATUS));
9190 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9191 /* delay required during transition out of D3hot */
9196 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9200 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9202 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9205 /* No more memory access after this point until
9206 * device is brought back to D0.
9217 * net_device service functions
9220 static int bnx2x_poll(struct napi_struct *napi, int budget)
9222 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9224 struct bnx2x *bp = fp->bp;
9227 #ifdef BNX2X_STOP_ON_ERROR
9228 if (unlikely(bp->panic))
9232 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9233 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9234 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9236 bnx2x_update_fpsb_idx(fp);
9238 if (BNX2X_HAS_TX_WORK(fp))
9239 bnx2x_tx_int(fp, budget);
9241 if (BNX2X_HAS_RX_WORK(fp))
9242 work_done = bnx2x_rx_int(fp, budget);
9244 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9246 /* must not complete if we consumed full budget */
9247 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9249 #ifdef BNX2X_STOP_ON_ERROR
9252 netif_rx_complete(bp->dev, napi);
9254 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9255 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9256 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9257 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9263 /* we split the first BD into headers and data BDs
9264 * to ease the pain of our fellow micocode engineers
9265 * we use one mapping for both BDs
9266 * So far this has only been observed to happen
9267 * in Other Operating Systems(TM)
9269 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9270 struct bnx2x_fastpath *fp,
9271 struct eth_tx_bd **tx_bd, u16 hlen,
9272 u16 bd_prod, int nbd)
9274 struct eth_tx_bd *h_tx_bd = *tx_bd;
9275 struct eth_tx_bd *d_tx_bd;
9277 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9279 /* first fix first BD */
9280 h_tx_bd->nbd = cpu_to_le16(nbd);
9281 h_tx_bd->nbytes = cpu_to_le16(hlen);
9283 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9284 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9285 h_tx_bd->addr_lo, h_tx_bd->nbd);
9287 /* now get a new data BD
9288 * (after the pbd) and fill it */
9289 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9290 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9292 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9293 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9295 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9296 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9297 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9299 /* this marks the BD as one that has no individual mapping
9300 * the FW ignores this flag in a BD not marked start
9302 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9303 DP(NETIF_MSG_TX_QUEUED,
9304 "TSO split data size is %d (%x:%x)\n",
9305 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9307 /* update tx_bd for marking the last BD flag */
9313 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9316 csum = (u16) ~csum_fold(csum_sub(csum,
9317 csum_partial(t_header - fix, fix, 0)));
9320 csum = (u16) ~csum_fold(csum_add(csum,
9321 csum_partial(t_header, -fix, 0)));
9323 return swab16(csum);
9326 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9330 if (skb->ip_summed != CHECKSUM_PARTIAL)
9334 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9336 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9337 rc |= XMIT_CSUM_TCP;
9341 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9342 rc |= XMIT_CSUM_TCP;
9346 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9349 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9355 /* check if packet requires linearization (packet is too fragmented) */
9356 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9361 int first_bd_sz = 0;
9363 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9364 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9366 if (xmit_type & XMIT_GSO) {
9367 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9368 /* Check if LSO packet needs to be copied:
9369 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9370 int wnd_size = MAX_FETCH_BD - 3;
9371 /* Number of widnows to check */
9372 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9377 /* Headers length */
9378 hlen = (int)(skb_transport_header(skb) - skb->data) +
9381 /* Amount of data (w/o headers) on linear part of SKB*/
9382 first_bd_sz = skb_headlen(skb) - hlen;
9384 wnd_sum = first_bd_sz;
9386 /* Calculate the first sum - it's special */
9387 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9389 skb_shinfo(skb)->frags[frag_idx].size;
9391 /* If there was data on linear skb data - check it */
9392 if (first_bd_sz > 0) {
9393 if (unlikely(wnd_sum < lso_mss)) {
9398 wnd_sum -= first_bd_sz;
9401 /* Others are easier: run through the frag list and
9402 check all windows */
9403 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9405 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9407 if (unlikely(wnd_sum < lso_mss)) {
9412 skb_shinfo(skb)->frags[wnd_idx].size;
9416 /* in non-LSO too fragmented packet should always
9423 if (unlikely(to_copy))
9424 DP(NETIF_MSG_TX_QUEUED,
9425 "Linearization IS REQUIRED for %s packet. "
9426 "num_frags %d hlen %d first_bd_sz %d\n",
9427 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9428 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9433 /* called with netif_tx_lock
9434 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9435 * netif_wake_queue()
9437 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9439 struct bnx2x *bp = netdev_priv(dev);
9440 struct bnx2x_fastpath *fp;
9441 struct sw_tx_bd *tx_buf;
9442 struct eth_tx_bd *tx_bd;
9443 struct eth_tx_parse_bd *pbd = NULL;
9444 u16 pkt_prod, bd_prod;
9447 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9448 int vlan_off = (bp->e1hov ? 4 : 0);
9452 #ifdef BNX2X_STOP_ON_ERROR
9453 if (unlikely(bp->panic))
9454 return NETDEV_TX_BUSY;
9457 fp_index = (smp_processor_id() % bp->num_queues);
9458 fp = &bp->fp[fp_index];
9460 if (unlikely(bnx2x_tx_avail(bp->fp) <
9461 (skb_shinfo(skb)->nr_frags + 3))) {
9462 bp->eth_stats.driver_xoff++,
9463 netif_stop_queue(dev);
9464 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9465 return NETDEV_TX_BUSY;
9468 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9469 " gso type %x xmit_type %x\n",
9470 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9471 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9473 /* First, check if we need to linearaize the skb
9474 (due to FW restrictions) */
9475 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9476 /* Statistics of linearization */
9478 if (skb_linearize(skb) != 0) {
9479 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9480 "silently dropping this SKB\n");
9481 dev_kfree_skb_any(skb);
9482 return NETDEV_TX_OK;
9487 Please read carefully. First we use one BD which we mark as start,
9488 then for TSO or xsum we have a parsing info BD,
9489 and only then we have the rest of the TSO BDs.
9490 (don't forget to mark the last one as last,
9491 and to unmap only AFTER you write to the BD ...)
9492 And above all, all pdb sizes are in words - NOT DWORDS!
9495 pkt_prod = fp->tx_pkt_prod++;
9496 bd_prod = TX_BD(fp->tx_bd_prod);
9498 /* get a tx_buf and first BD */
9499 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9500 tx_bd = &fp->tx_desc_ring[bd_prod];
9502 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9503 tx_bd->general_data = (UNICAST_ADDRESS <<
9504 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9505 tx_bd->general_data |= 1; /* header nbd */
9507 /* remember the first BD of the packet */
9508 tx_buf->first_bd = fp->tx_bd_prod;
9511 DP(NETIF_MSG_TX_QUEUED,
9512 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9513 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9515 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9516 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9517 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9520 tx_bd->vlan = cpu_to_le16(pkt_prod);
9524 /* turn on parsing and get a BD */
9525 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9526 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9528 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9531 if (xmit_type & XMIT_CSUM) {
9532 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9534 /* for now NS flag is not used in Linux */
9535 pbd->global_data = (hlen |
9536 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9537 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9539 pbd->ip_hlen = (skb_transport_header(skb) -
9540 skb_network_header(skb)) / 2;
9542 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9544 pbd->total_hlen = cpu_to_le16(hlen);
9545 hlen = hlen*2 - vlan_off;
9547 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9549 if (xmit_type & XMIT_CSUM_V4)
9550 tx_bd->bd_flags.as_bitfield |=
9551 ETH_TX_BD_FLAGS_IP_CSUM;
9553 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9555 if (xmit_type & XMIT_CSUM_TCP) {
9556 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9559 s8 fix = SKB_CS_OFF(skb); /* signed! */
9561 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9562 pbd->cs_offset = fix / 2;
9564 DP(NETIF_MSG_TX_QUEUED,
9565 "hlen %d offset %d fix %d csum before fix %x\n",
9566 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9569 /* HW bug: fixup the CSUM */
9570 pbd->tcp_pseudo_csum =
9571 bnx2x_csum_fix(skb_transport_header(skb),
9574 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9575 pbd->tcp_pseudo_csum);
9579 mapping = pci_map_single(bp->pdev, skb->data,
9580 skb_headlen(skb), PCI_DMA_TODEVICE);
9582 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9583 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9584 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9585 tx_bd->nbd = cpu_to_le16(nbd);
9586 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9588 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9589 " nbytes %d flags %x vlan %x\n",
9590 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9591 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9592 le16_to_cpu(tx_bd->vlan));
9594 if (xmit_type & XMIT_GSO) {
9596 DP(NETIF_MSG_TX_QUEUED,
9597 "TSO packet len %d hlen %d total len %d tso size %d\n",
9598 skb->len, hlen, skb_headlen(skb),
9599 skb_shinfo(skb)->gso_size);
9601 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9603 if (unlikely(skb_headlen(skb) > hlen))
9604 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9607 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9608 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9609 pbd->tcp_flags = pbd_tcp_flags(skb);
9611 if (xmit_type & XMIT_GSO_V4) {
9612 pbd->ip_id = swab16(ip_hdr(skb)->id);
9613 pbd->tcp_pseudo_csum =
9614 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9616 0, IPPROTO_TCP, 0));
9619 pbd->tcp_pseudo_csum =
9620 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9621 &ipv6_hdr(skb)->daddr,
9622 0, IPPROTO_TCP, 0));
9624 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9627 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9628 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9630 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9631 tx_bd = &fp->tx_desc_ring[bd_prod];
9633 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9634 frag->size, PCI_DMA_TODEVICE);
9636 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9637 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9638 tx_bd->nbytes = cpu_to_le16(frag->size);
9639 tx_bd->vlan = cpu_to_le16(pkt_prod);
9640 tx_bd->bd_flags.as_bitfield = 0;
9642 DP(NETIF_MSG_TX_QUEUED,
9643 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9644 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9645 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9648 /* now at last mark the BD as the last BD */
9649 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9651 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9652 tx_bd, tx_bd->bd_flags.as_bitfield);
9654 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9656 /* now send a tx doorbell, counting the next BD
9657 * if the packet contains or ends with it
9659 if (TX_BD_POFF(bd_prod) < nbd)
9663 DP(NETIF_MSG_TX_QUEUED,
9664 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9665 " tcp_flags %x xsum %x seq %u hlen %u\n",
9666 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9667 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9668 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9670 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9672 fp->hw_tx_prods->bds_prod =
9673 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9674 mb(); /* FW restriction: must not reorder writing nbd and packets */
9675 fp->hw_tx_prods->packets_prod =
9676 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9677 DOORBELL(bp, FP_IDX(fp), 0);
9681 fp->tx_bd_prod += nbd;
9682 dev->trans_start = jiffies;
9684 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9685 netif_stop_queue(dev);
9686 bp->eth_stats.driver_xoff++;
9687 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9688 netif_wake_queue(dev);
9692 return NETDEV_TX_OK;
9695 /* called with rtnl_lock */
9696 static int bnx2x_open(struct net_device *dev)
9698 struct bnx2x *bp = netdev_priv(dev);
9700 bnx2x_set_power_state(bp, PCI_D0);
9702 return bnx2x_nic_load(bp, LOAD_OPEN);
9705 /* called with rtnl_lock */
9706 static int bnx2x_close(struct net_device *dev)
9708 struct bnx2x *bp = netdev_priv(dev);
9710 /* Unload the driver, release IRQs */
9711 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9712 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9713 if (!CHIP_REV_IS_SLOW(bp))
9714 bnx2x_set_power_state(bp, PCI_D3hot);
9719 /* called with netif_tx_lock from set_multicast */
9720 static void bnx2x_set_rx_mode(struct net_device *dev)
9722 struct bnx2x *bp = netdev_priv(dev);
9723 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9724 int port = BP_PORT(bp);
9726 if (bp->state != BNX2X_STATE_OPEN) {
9727 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9731 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9733 if (dev->flags & IFF_PROMISC)
9734 rx_mode = BNX2X_RX_MODE_PROMISC;
9736 else if ((dev->flags & IFF_ALLMULTI) ||
9737 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9738 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9740 else { /* some multicasts */
9741 if (CHIP_IS_E1(bp)) {
9743 struct dev_mc_list *mclist;
9744 struct mac_configuration_cmd *config =
9745 bnx2x_sp(bp, mcast_config);
9747 for (i = 0, mclist = dev->mc_list;
9748 mclist && (i < dev->mc_count);
9749 i++, mclist = mclist->next) {
9751 config->config_table[i].
9752 cam_entry.msb_mac_addr =
9753 swab16(*(u16 *)&mclist->dmi_addr[0]);
9754 config->config_table[i].
9755 cam_entry.middle_mac_addr =
9756 swab16(*(u16 *)&mclist->dmi_addr[2]);
9757 config->config_table[i].
9758 cam_entry.lsb_mac_addr =
9759 swab16(*(u16 *)&mclist->dmi_addr[4]);
9760 config->config_table[i].cam_entry.flags =
9762 config->config_table[i].
9763 target_table_entry.flags = 0;
9764 config->config_table[i].
9765 target_table_entry.client_id = 0;
9766 config->config_table[i].
9767 target_table_entry.vlan_id = 0;
9770 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9771 config->config_table[i].
9772 cam_entry.msb_mac_addr,
9773 config->config_table[i].
9774 cam_entry.middle_mac_addr,
9775 config->config_table[i].
9776 cam_entry.lsb_mac_addr);
9778 old = config->hdr.length_6b;
9780 for (; i < old; i++) {
9781 if (CAM_IS_INVALID(config->
9783 i--; /* already invalidated */
9787 CAM_INVALIDATE(config->
9792 if (CHIP_REV_IS_SLOW(bp))
9793 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9795 offset = BNX2X_MAX_MULTICAST*(1 + port);
9797 config->hdr.length_6b = i;
9798 config->hdr.offset = offset;
9799 config->hdr.client_id = BP_CL_ID(bp);
9800 config->hdr.reserved1 = 0;
9802 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9803 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9804 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9807 /* Accept one or more multicasts */
9808 struct dev_mc_list *mclist;
9809 u32 mc_filter[MC_HASH_SIZE];
9810 u32 crc, bit, regidx;
9813 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9815 for (i = 0, mclist = dev->mc_list;
9816 mclist && (i < dev->mc_count);
9817 i++, mclist = mclist->next) {
9819 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9820 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9821 mclist->dmi_addr[0], mclist->dmi_addr[1],
9822 mclist->dmi_addr[2], mclist->dmi_addr[3],
9823 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9825 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9826 bit = (crc >> 24) & 0xff;
9829 mc_filter[regidx] |= (1 << bit);
9832 for (i = 0; i < MC_HASH_SIZE; i++)
9833 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9838 bp->rx_mode = rx_mode;
9839 bnx2x_set_storm_rx_mode(bp);
9842 /* called with rtnl_lock */
9843 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9845 struct sockaddr *addr = p;
9846 struct bnx2x *bp = netdev_priv(dev);
9848 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9851 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9852 if (netif_running(dev)) {
9854 bnx2x_set_mac_addr_e1(bp, 1);
9856 bnx2x_set_mac_addr_e1h(bp, 1);
9862 /* called with rtnl_lock */
9863 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9865 struct mii_ioctl_data *data = if_mii(ifr);
9866 struct bnx2x *bp = netdev_priv(dev);
9871 data->phy_id = bp->port.phy_addr;
9878 if (!netif_running(dev))
9881 mutex_lock(&bp->port.phy_mutex);
9882 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9883 DEFAULT_PHY_DEV_ADDR,
9884 (data->reg_num & 0x1f), &mii_regval);
9885 data->val_out = mii_regval;
9886 mutex_unlock(&bp->port.phy_mutex);
9891 if (!capable(CAP_NET_ADMIN))
9894 if (!netif_running(dev))
9897 mutex_lock(&bp->port.phy_mutex);
9898 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9899 DEFAULT_PHY_DEV_ADDR,
9900 (data->reg_num & 0x1f), data->val_in);
9901 mutex_unlock(&bp->port.phy_mutex);
9912 /* called with rtnl_lock */
9913 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9915 struct bnx2x *bp = netdev_priv(dev);
9918 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9919 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9922 /* This does not race with packet allocation
9923 * because the actual alloc size is
9924 * only updated as part of load
9928 if (netif_running(dev)) {
9929 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9930 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9936 static void bnx2x_tx_timeout(struct net_device *dev)
9938 struct bnx2x *bp = netdev_priv(dev);
9940 #ifdef BNX2X_STOP_ON_ERROR
9944 /* This allows the netif to be shutdown gracefully before resetting */
9945 schedule_work(&bp->reset_task);
9949 /* called with rtnl_lock */
9950 static void bnx2x_vlan_rx_register(struct net_device *dev,
9951 struct vlan_group *vlgrp)
9953 struct bnx2x *bp = netdev_priv(dev);
9956 if (netif_running(dev))
9957 bnx2x_set_client_config(bp);
9962 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9963 static void poll_bnx2x(struct net_device *dev)
9965 struct bnx2x *bp = netdev_priv(dev);
9967 disable_irq(bp->pdev->irq);
9968 bnx2x_interrupt(bp->pdev->irq, dev);
9969 enable_irq(bp->pdev->irq);
9973 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9974 struct net_device *dev)
9979 SET_NETDEV_DEV(dev, &pdev->dev);
9980 bp = netdev_priv(dev);
9985 bp->func = PCI_FUNC(pdev->devfn);
9987 rc = pci_enable_device(pdev);
9989 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9993 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9994 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9997 goto err_out_disable;
10000 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10001 printk(KERN_ERR PFX "Cannot find second PCI device"
10002 " base address, aborting\n");
10004 goto err_out_disable;
10007 if (atomic_read(&pdev->enable_cnt) == 1) {
10008 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10010 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10012 goto err_out_disable;
10015 pci_set_master(pdev);
10016 pci_save_state(pdev);
10019 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10020 if (bp->pm_cap == 0) {
10021 printk(KERN_ERR PFX "Cannot find power management"
10022 " capability, aborting\n");
10024 goto err_out_release;
10027 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10028 if (bp->pcie_cap == 0) {
10029 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10032 goto err_out_release;
10035 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10036 bp->flags |= USING_DAC_FLAG;
10037 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10038 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10039 " failed, aborting\n");
10041 goto err_out_release;
10044 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10045 printk(KERN_ERR PFX "System does not support DMA,"
10048 goto err_out_release;
10051 dev->mem_start = pci_resource_start(pdev, 0);
10052 dev->base_addr = dev->mem_start;
10053 dev->mem_end = pci_resource_end(pdev, 0);
10055 dev->irq = pdev->irq;
10057 bp->regview = ioremap_nocache(dev->base_addr,
10058 pci_resource_len(pdev, 0));
10059 if (!bp->regview) {
10060 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10062 goto err_out_release;
10065 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10066 min_t(u64, BNX2X_DB_SIZE,
10067 pci_resource_len(pdev, 2)));
10068 if (!bp->doorbells) {
10069 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10071 goto err_out_unmap;
10074 bnx2x_set_power_state(bp, PCI_D0);
10076 /* clean indirect addresses */
10077 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10078 PCICFG_VENDOR_ID_OFFSET);
10079 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10080 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10081 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10082 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10084 dev->hard_start_xmit = bnx2x_start_xmit;
10085 dev->watchdog_timeo = TX_TIMEOUT;
10087 dev->ethtool_ops = &bnx2x_ethtool_ops;
10088 dev->open = bnx2x_open;
10089 dev->stop = bnx2x_close;
10090 dev->set_multicast_list = bnx2x_set_rx_mode;
10091 dev->set_mac_address = bnx2x_change_mac_addr;
10092 dev->do_ioctl = bnx2x_ioctl;
10093 dev->change_mtu = bnx2x_change_mtu;
10094 dev->tx_timeout = bnx2x_tx_timeout;
10096 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10098 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10099 dev->poll_controller = poll_bnx2x;
10101 dev->features |= NETIF_F_SG;
10102 dev->features |= NETIF_F_HW_CSUM;
10103 if (bp->flags & USING_DAC_FLAG)
10104 dev->features |= NETIF_F_HIGHDMA;
10106 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10108 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10109 dev->features |= NETIF_F_TSO6;
10115 iounmap(bp->regview);
10116 bp->regview = NULL;
10118 if (bp->doorbells) {
10119 iounmap(bp->doorbells);
10120 bp->doorbells = NULL;
10124 if (atomic_read(&pdev->enable_cnt) == 1)
10125 pci_release_regions(pdev);
10128 pci_disable_device(pdev);
10129 pci_set_drvdata(pdev, NULL);
10135 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10137 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10139 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10143 /* return value of 1=2.5GHz 2=5GHz */
10144 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10146 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10148 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10152 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10153 const struct pci_device_id *ent)
10155 static int version_printed;
10156 struct net_device *dev = NULL;
10159 DECLARE_MAC_BUF(mac);
10161 if (version_printed++ == 0)
10162 printk(KERN_INFO "%s", version);
10164 /* dev zeroed in init_etherdev */
10165 dev = alloc_etherdev(sizeof(*bp));
10167 printk(KERN_ERR PFX "Cannot allocate net device\n");
10171 netif_carrier_off(dev);
10173 bp = netdev_priv(dev);
10174 bp->msglevel = debug;
10176 rc = bnx2x_init_dev(pdev, dev);
10182 rc = register_netdev(dev);
10184 dev_err(&pdev->dev, "Cannot register net device\n");
10185 goto init_one_exit;
10188 pci_set_drvdata(pdev, dev);
10190 rc = bnx2x_init_bp(bp);
10192 unregister_netdev(dev);
10193 goto init_one_exit;
10196 bp->common.name = board_info[ent->driver_data].name;
10197 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10198 " IRQ %d, ", dev->name, bp->common.name,
10199 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10200 bnx2x_get_pcie_width(bp),
10201 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10202 dev->base_addr, bp->pdev->irq);
10203 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10208 iounmap(bp->regview);
10211 iounmap(bp->doorbells);
10215 if (atomic_read(&pdev->enable_cnt) == 1)
10216 pci_release_regions(pdev);
10218 pci_disable_device(pdev);
10219 pci_set_drvdata(pdev, NULL);
10224 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10226 struct net_device *dev = pci_get_drvdata(pdev);
10230 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10233 bp = netdev_priv(dev);
10235 unregister_netdev(dev);
10238 iounmap(bp->regview);
10241 iounmap(bp->doorbells);
10245 if (atomic_read(&pdev->enable_cnt) == 1)
10246 pci_release_regions(pdev);
10248 pci_disable_device(pdev);
10249 pci_set_drvdata(pdev, NULL);
10252 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10254 struct net_device *dev = pci_get_drvdata(pdev);
10258 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10261 bp = netdev_priv(dev);
10265 pci_save_state(pdev);
10267 if (!netif_running(dev)) {
10272 netif_device_detach(dev);
10274 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10276 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10283 static int bnx2x_resume(struct pci_dev *pdev)
10285 struct net_device *dev = pci_get_drvdata(pdev);
10290 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10293 bp = netdev_priv(dev);
10297 pci_restore_state(pdev);
10299 if (!netif_running(dev)) {
10304 bnx2x_set_power_state(bp, PCI_D0);
10305 netif_device_attach(dev);
10307 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10315 * bnx2x_io_error_detected - called when PCI error is detected
10316 * @pdev: Pointer to PCI device
10317 * @state: The current pci connection state
10319 * This function is called after a PCI bus error affecting
10320 * this device has been detected.
10322 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10323 pci_channel_state_t state)
10325 struct net_device *dev = pci_get_drvdata(pdev);
10326 struct bnx2x *bp = netdev_priv(dev);
10330 netif_device_detach(dev);
10332 if (netif_running(dev))
10333 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10335 pci_disable_device(pdev);
10339 /* Request a slot reset */
10340 return PCI_ERS_RESULT_NEED_RESET;
10344 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10345 * @pdev: Pointer to PCI device
10347 * Restart the card from scratch, as if from a cold-boot.
10349 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10351 struct net_device *dev = pci_get_drvdata(pdev);
10352 struct bnx2x *bp = netdev_priv(dev);
10356 if (pci_enable_device(pdev)) {
10357 dev_err(&pdev->dev,
10358 "Cannot re-enable PCI device after reset\n");
10360 return PCI_ERS_RESULT_DISCONNECT;
10363 pci_set_master(pdev);
10364 pci_restore_state(pdev);
10366 if (netif_running(dev))
10367 bnx2x_set_power_state(bp, PCI_D0);
10371 return PCI_ERS_RESULT_RECOVERED;
10375 * bnx2x_io_resume - called when traffic can start flowing again
10376 * @pdev: Pointer to PCI device
10378 * This callback is called when the error recovery driver tells us that
10379 * its OK to resume normal operation.
10381 static void bnx2x_io_resume(struct pci_dev *pdev)
10383 struct net_device *dev = pci_get_drvdata(pdev);
10384 struct bnx2x *bp = netdev_priv(dev);
10388 if (netif_running(dev))
10389 bnx2x_nic_load(bp, LOAD_OPEN);
10391 netif_device_attach(dev);
10396 static struct pci_error_handlers bnx2x_err_handler = {
10397 .error_detected = bnx2x_io_error_detected,
10398 .slot_reset = bnx2x_io_slot_reset,
10399 .resume = bnx2x_io_resume,
10402 static struct pci_driver bnx2x_pci_driver = {
10403 .name = DRV_MODULE_NAME,
10404 .id_table = bnx2x_pci_tbl,
10405 .probe = bnx2x_init_one,
10406 .remove = __devexit_p(bnx2x_remove_one),
10407 .suspend = bnx2x_suspend,
10408 .resume = bnx2x_resume,
10409 .err_handler = &bnx2x_err_handler,
10412 static int __init bnx2x_init(void)
10414 return pci_register_driver(&bnx2x_pci_driver);
10417 static void __exit bnx2x_cleanup(void)
10419 pci_unregister_driver(&bnx2x_pci_driver);
10422 module_init(bnx2x_init);
10423 module_exit(bnx2x_cleanup);