]> err.no Git - linux-2.6/blob - drivers/net/bnx2x_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54
55 #include "bnx2x_reg.h"
56 #include "bnx2x_fw_defs.h"
57 #include "bnx2x_hsi.h"
58 #include "bnx2x_link.h"
59 #include "bnx2x.h"
60 #include "bnx2x_init.h"
61
62 #define DRV_MODULE_VERSION      "1.45.17"
63 #define DRV_MODULE_RELDATE      "2008/08/13"
64 #define BNX2X_BC_VER            0x040200
65
66 /* Time in jiffies before concluding the transmitter is hung */
67 #define TX_TIMEOUT              (5*HZ)
68
69 static char version[] __devinitdata =
70         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
71         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73 MODULE_AUTHOR("Eliezer Tamir");
74 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77
78 static int disable_tpa;
79 static int use_inta;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83 static int use_multi;
84
85 module_param(disable_tpa, int, 0);
86 module_param(use_inta, int, 0);
87 module_param(poll, int, 0);
88 module_param(debug, int, 0);
89 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
90 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91 MODULE_PARM_DESC(poll, "use polling (for debug)");
92 MODULE_PARM_DESC(debug, "default debug msglevel");
93
94 #ifdef BNX2X_MULTI
95 module_param(use_multi, int, 0);
96 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97 #endif
98
99 enum bnx2x_board_type {
100         BCM57710 = 0,
101         BCM57711 = 1,
102         BCM57711E = 2,
103 };
104
105 /* indexed by board_type, above */
106 static struct {
107         char *name;
108 } board_info[] __devinitdata = {
109         { "Broadcom NetXtreme II BCM57710 XGb" },
110         { "Broadcom NetXtreme II BCM57711 XGb" },
111         { "Broadcom NetXtreme II BCM57711E XGb" }
112 };
113
114
115 static const struct pci_device_id bnx2x_pci_tbl[] = {
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
122         { 0 }
123 };
124
125 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127 /****************************************************************************
128 * General service functions
129 ****************************************************************************/
130
131 /* used only at init
132  * locking is done by mcp
133  */
134 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 {
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139                                PCICFG_VENDOR_ID_OFFSET);
140 }
141
142 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143 {
144         u32 val;
145
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150
151         return val;
152 }
153
154 static const u32 dmae_reg_go_c[] = {
155         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159 };
160
161 /* copy command into DMAE command memory and set DMAE command go */
162 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163                             int idx)
164 {
165         u32 cmd_offset;
166         int i;
167
168         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
172                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174         }
175         REG_WR(bp, dmae_reg_go_c[idx], 1);
176 }
177
178 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179                       u32 len32)
180 {
181         struct dmae_command *dmae = &bp->init_dmae;
182         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
183         int cnt = 200;
184
185         if (!bp->dmae_ready) {
186                 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
189                    "  using indirect\n", dst_addr, len32);
190                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191                 return;
192         }
193
194         mutex_lock(&bp->dmae_mutex);
195
196         memset(dmae, 0, sizeof(struct dmae_command));
197
198         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 #ifdef __BIG_ENDIAN
202                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 #else
204                         DMAE_CMD_ENDIANITY_DW_SWAP |
205 #endif
206                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
208         dmae->src_addr_lo = U64_LO(dma_addr);
209         dmae->src_addr_hi = U64_HI(dma_addr);
210         dmae->dst_addr_lo = dst_addr >> 2;
211         dmae->dst_addr_hi = 0;
212         dmae->len = len32;
213         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_val = DMAE_COMP_VAL;
216
217         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
218            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
219                     "dst_addr [%x:%08x (%08x)]\n"
220            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
221            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
224         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
225            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
227
228         *wb_comp = 0;
229
230         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
231
232         udelay(5);
233
234         while (*wb_comp != DMAE_COMP_VAL) {
235                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
237                 if (!cnt) {
238                         BNX2X_ERR("dmae timeout!\n");
239                         break;
240                 }
241                 cnt--;
242                 /* adjust delay for emulation/FPGA */
243                 if (CHIP_REV_IS_SLOW(bp))
244                         msleep(100);
245                 else
246                         udelay(5);
247         }
248
249         mutex_unlock(&bp->dmae_mutex);
250 }
251
252 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 {
254         struct dmae_command *dmae = &bp->init_dmae;
255         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
256         int cnt = 200;
257
258         if (!bp->dmae_ready) {
259                 u32 *data = bnx2x_sp(bp, wb_data[0]);
260                 int i;
261
262                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
263                    "  using indirect\n", src_addr, len32);
264                 for (i = 0; i < len32; i++)
265                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266                 return;
267         }
268
269         mutex_lock(&bp->dmae_mutex);
270
271         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272         memset(dmae, 0, sizeof(struct dmae_command));
273
274         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 #ifdef __BIG_ENDIAN
278                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 #else
280                         DMAE_CMD_ENDIANITY_DW_SWAP |
281 #endif
282                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
284         dmae->src_addr_lo = src_addr >> 2;
285         dmae->src_addr_hi = 0;
286         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288         dmae->len = len32;
289         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_val = DMAE_COMP_VAL;
292
293         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
294            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
295                     "dst_addr [%x:%08x (%08x)]\n"
296            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
297            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
300
301         *wb_comp = 0;
302
303         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
304
305         udelay(5);
306
307         while (*wb_comp != DMAE_COMP_VAL) {
308
309                 if (!cnt) {
310                         BNX2X_ERR("dmae timeout!\n");
311                         break;
312                 }
313                 cnt--;
314                 /* adjust delay for emulation/FPGA */
315                 if (CHIP_REV_IS_SLOW(bp))
316                         msleep(100);
317                 else
318                         udelay(5);
319         }
320         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
321            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323
324         mutex_unlock(&bp->dmae_mutex);
325 }
326
327 /* used only for slowpath so not inlined */
328 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329 {
330         u32 wb_write[2];
331
332         wb_write[0] = val_hi;
333         wb_write[1] = val_lo;
334         REG_WR_DMAE(bp, reg, wb_write, 2);
335 }
336
337 #ifdef USE_WB_RD
338 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339 {
340         u32 wb_data[2];
341
342         REG_RD_DMAE(bp, reg, wb_data, 2);
343
344         return HILO_U64(wb_data[0], wb_data[1]);
345 }
346 #endif
347
348 static int bnx2x_mc_assert(struct bnx2x *bp)
349 {
350         char last_idx;
351         int i, rc = 0;
352         u32 row0, row1, row2, row3;
353
354         /* XSTORM */
355         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
357         if (last_idx)
358                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360         /* print the asserts */
361         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364                               XSTORM_ASSERT_LIST_OFFSET(i));
365                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374                                   " 0x%08x 0x%08x 0x%08x\n",
375                                   i, row3, row2, row1, row0);
376                         rc++;
377                 } else {
378                         break;
379                 }
380         }
381
382         /* TSTORM */
383         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
385         if (last_idx)
386                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388         /* print the asserts */
389         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392                               TSTORM_ASSERT_LIST_OFFSET(i));
393                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402                                   " 0x%08x 0x%08x 0x%08x\n",
403                                   i, row3, row2, row1, row0);
404                         rc++;
405                 } else {
406                         break;
407                 }
408         }
409
410         /* CSTORM */
411         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
413         if (last_idx)
414                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416         /* print the asserts */
417         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420                               CSTORM_ASSERT_LIST_OFFSET(i));
421                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430                                   " 0x%08x 0x%08x 0x%08x\n",
431                                   i, row3, row2, row1, row0);
432                         rc++;
433                 } else {
434                         break;
435                 }
436         }
437
438         /* USTORM */
439         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440                            USTORM_ASSERT_LIST_INDEX_OFFSET);
441         if (last_idx)
442                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444         /* print the asserts */
445         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448                               USTORM_ASSERT_LIST_OFFSET(i));
449                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
451                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
453                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458                                   " 0x%08x 0x%08x 0x%08x\n",
459                                   i, row3, row2, row1, row0);
460                         rc++;
461                 } else {
462                         break;
463                 }
464         }
465
466         return rc;
467 }
468
469 static void bnx2x_fw_dump(struct bnx2x *bp)
470 {
471         u32 mark, offset;
472         u32 data[9];
473         int word;
474
475         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
476         mark = ((mark + 0x3) & ~0x3);
477         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478
479         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480                 for (word = 0; word < 8; word++)
481                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482                                                   offset + 4*word));
483                 data[8] = 0x0;
484                 printk(KERN_CONT "%s", (char *)data);
485         }
486         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487                 for (word = 0; word < 8; word++)
488                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489                                                   offset + 4*word));
490                 data[8] = 0x0;
491                 printk(KERN_CONT "%s", (char *)data);
492         }
493         printk("\n" KERN_ERR PFX "end of fw dump\n");
494 }
495
496 static void bnx2x_panic_dump(struct bnx2x *bp)
497 {
498         int i;
499         u16 j, start, end;
500
501         bp->stats_state = STATS_STATE_DISABLED;
502         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
504         BNX2X_ERR("begin crash dump -----------------\n");
505
506         for_each_queue(bp, i) {
507                 struct bnx2x_fastpath *fp = &bp->fp[i];
508                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
511                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
512                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
515                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
516                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
517                           fp->rx_bd_prod, fp->rx_bd_cons,
518                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
521                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
522                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
523                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524                           fp->status_blk->c_status_block.status_block_index,
525                           fp->fp_u_idx,
526                           fp->status_blk->u_status_block.status_block_index,
527                           hw_prods->packets_prod, hw_prods->bds_prod);
528
529                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531                 for (j = start; j < end; j++) {
532                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535                                   sw_bd->skb, sw_bd->first_bd);
536                 }
537
538                 start = TX_BD(fp->tx_bd_cons - 10);
539                 end = TX_BD(fp->tx_bd_cons + 254);
540                 for (j = start; j < end; j++) {
541                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545                 }
546
547                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549                 for (j = start; j < end; j++) {
550                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
554                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
555                 }
556
557                 start = RX_SGE(fp->rx_sge_prod);
558                 end = RX_SGE(fp->last_max_sge);
559                 for (j = start; j < end; j++) {
560                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
564                                   j, rx_sge[1], rx_sge[0], sw_page->page);
565                 }
566
567                 start = RCQ_BD(fp->rx_comp_cons - 10);
568                 end = RCQ_BD(fp->rx_comp_cons + 503);
569                 for (j = start; j < end; j++) {
570                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
574                 }
575         }
576
577         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
578                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
579                   "  spq_prod_idx(%u)\n",
580                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
581                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
583         bnx2x_fw_dump(bp);
584         bnx2x_mc_assert(bp);
585         BNX2X_ERR("end crash dump -----------------\n");
586 }
587
588 static void bnx2x_int_enable(struct bnx2x *bp)
589 {
590         int port = BP_PORT(bp);
591         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592         u32 val = REG_RD(bp, addr);
593         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599         } else {
600                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
603                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604
605                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
606                    val, port, addr, msix);
607
608                 REG_WR(bp, addr, val);
609
610                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611         }
612
613         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
614            val, port, addr, msix);
615
616         REG_WR(bp, addr, val);
617
618         if (CHIP_IS_E1H(bp)) {
619                 /* init leading/trailing edge */
620                 if (IS_E1HMF(bp)) {
621                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622                         if (bp->port.pmf)
623                                 /* enable nig attention */
624                                 val |= 0x0100;
625                 } else
626                         val = 0xffff;
627
628                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630         }
631 }
632
633 static void bnx2x_int_disable(struct bnx2x *bp)
634 {
635         int port = BP_PORT(bp);
636         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637         u32 val = REG_RD(bp, addr);
638
639         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
642                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645            val, port, addr);
646
647         REG_WR(bp, addr, val);
648         if (REG_RD(bp, addr) != val)
649                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650 }
651
652 static void bnx2x_int_disable_sync(struct bnx2x *bp)
653 {
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int i;
656
657         /* disable interrupt handling */
658         atomic_inc(&bp->intr_sem);
659         /* prevent the HW from sending interrupts */
660         bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_work_sync(&bp->sp_task);
674 }
675
676 /* fast path */
677
678 /*
679  * General service functions
680  */
681
682 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
683                                 u8 storm, u16 index, u8 op, u8 update)
684 {
685         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686                        COMMAND_REG_INT_ACK);
687         struct igu_ack_register igu_ack;
688
689         igu_ack.status_block_index = index;
690         igu_ack.sb_id_and_flags =
691                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
692                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
693                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
694                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
695
696         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
697            (*(u32 *)&igu_ack), hc_addr);
698         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
699 }
700
701 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
702 {
703         struct host_status_block *fpsb = fp->status_blk;
704         u16 rc = 0;
705
706         barrier(); /* status block is written to by the chip */
707         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
708                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
709                 rc |= 1;
710         }
711         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
712                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
713                 rc |= 2;
714         }
715         return rc;
716 }
717
718 static u16 bnx2x_ack_int(struct bnx2x *bp)
719 {
720         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721                        COMMAND_REG_SIMD_MASK);
722         u32 result = REG_RD(bp, hc_addr);
723
724         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
725            result, hc_addr);
726
727         return result;
728 }
729
730
731 /*
732  * fast path service functions
733  */
734
735 /* free skb in the packet ring at pos idx
736  * return idx of last bd freed
737  */
738 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
739                              u16 idx)
740 {
741         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
742         struct eth_tx_bd *tx_bd;
743         struct sk_buff *skb = tx_buf->skb;
744         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
745         int nbd;
746
747         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
748            idx, tx_buf, skb);
749
750         /* unmap first bd */
751         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
752         tx_bd = &fp->tx_desc_ring[bd_idx];
753         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
754                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
755
756         nbd = le16_to_cpu(tx_bd->nbd) - 1;
757         new_cons = nbd + tx_buf->first_bd;
758 #ifdef BNX2X_STOP_ON_ERROR
759         if (nbd > (MAX_SKB_FRAGS + 2)) {
760                 BNX2X_ERR("BAD nbd!\n");
761                 bnx2x_panic();
762         }
763 #endif
764
765         /* Skip a parse bd and the TSO split header bd
766            since they have no mapping */
767         if (nbd)
768                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
769
770         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
771                                            ETH_TX_BD_FLAGS_TCP_CSUM |
772                                            ETH_TX_BD_FLAGS_SW_LSO)) {
773                 if (--nbd)
774                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
775                 tx_bd = &fp->tx_desc_ring[bd_idx];
776                 /* is this a TSO split header bd? */
777                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
778                         if (--nbd)
779                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
780                 }
781         }
782
783         /* now free frags */
784         while (nbd > 0) {
785
786                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
787                 tx_bd = &fp->tx_desc_ring[bd_idx];
788                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
789                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
790                 if (--nbd)
791                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792         }
793
794         /* release skb */
795         WARN_ON(!skb);
796         dev_kfree_skb(skb);
797         tx_buf->first_bd = 0;
798         tx_buf->skb = NULL;
799
800         return new_cons;
801 }
802
803 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
804 {
805         s16 used;
806         u16 prod;
807         u16 cons;
808
809         barrier(); /* Tell compiler that prod and cons can change */
810         prod = fp->tx_bd_prod;
811         cons = fp->tx_bd_cons;
812
813         /* NUM_TX_RINGS = number of "next-page" entries
814            It will be used as a threshold */
815         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
816
817 #ifdef BNX2X_STOP_ON_ERROR
818         WARN_ON(used < 0);
819         WARN_ON(used > fp->bp->tx_ring_size);
820         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
821 #endif
822
823         return (s16)(fp->bp->tx_ring_size) - used;
824 }
825
826 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
827 {
828         struct bnx2x *bp = fp->bp;
829         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
830         int done = 0;
831
832 #ifdef BNX2X_STOP_ON_ERROR
833         if (unlikely(bp->panic))
834                 return;
835 #endif
836
837         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
838         sw_cons = fp->tx_pkt_cons;
839
840         while (sw_cons != hw_cons) {
841                 u16 pkt_cons;
842
843                 pkt_cons = TX_BD(sw_cons);
844
845                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
846
847                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
848                    hw_cons, sw_cons, pkt_cons);
849
850 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
851                         rmb();
852                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
853                 }
854 */
855                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
856                 sw_cons++;
857                 done++;
858
859                 if (done == work)
860                         break;
861         }
862
863         fp->tx_pkt_cons = sw_cons;
864         fp->tx_bd_cons = bd_cons;
865
866         /* Need to make the tx_cons update visible to start_xmit()
867          * before checking for netif_queue_stopped().  Without the
868          * memory barrier, there is a small possibility that start_xmit()
869          * will miss it and cause the queue to be stopped forever.
870          */
871         smp_mb();
872
873         /* TBD need a thresh? */
874         if (unlikely(netif_queue_stopped(bp->dev))) {
875
876                 netif_tx_lock(bp->dev);
877
878                 if (netif_queue_stopped(bp->dev) &&
879                     (bp->state == BNX2X_STATE_OPEN) &&
880                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
881                         netif_wake_queue(bp->dev);
882
883                 netif_tx_unlock(bp->dev);
884         }
885 }
886
887
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889                            union eth_rx_cqe *rr_cqe)
890 {
891         struct bnx2x *bp = fp->bp;
892         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
895         DP(BNX2X_MSG_SP,
896            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
897            FP_IDX(fp), cid, command, bp->state,
898            rr_cqe->ramrod_cqe.ramrod_type);
899
900         bp->spq_left++;
901
902         if (FP_IDX(fp)) {
903                 switch (command | fp->state) {
904                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905                                                 BNX2X_FP_STATE_OPENING):
906                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907                            cid);
908                         fp->state = BNX2X_FP_STATE_OPEN;
909                         break;
910
911                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913                            cid);
914                         fp->state = BNX2X_FP_STATE_HALTED;
915                         break;
916
917                 default:
918                         BNX2X_ERR("unexpected MC reply (%d)  "
919                                   "fp->state is %x\n", command, fp->state);
920                         break;
921                 }
922                 mb(); /* force bnx2x_wait_ramrod() to see the change */
923                 return;
924         }
925
926         switch (command | bp->state) {
927         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929                 bp->state = BNX2X_STATE_OPEN;
930                 break;
931
932         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935                 fp->state = BNX2X_FP_STATE_HALTED;
936                 break;
937
938         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
941                 break;
942
943
944         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
946                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
947                 bp->set_mac_pending = 0;
948                 break;
949
950         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
951                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
952                 break;
953
954         default:
955                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
956                           command, bp->state);
957                 break;
958         }
959         mb(); /* force bnx2x_wait_ramrod() to see the change */
960 }
961
962 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
963                                      struct bnx2x_fastpath *fp, u16 index)
964 {
965         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
966         struct page *page = sw_buf->page;
967         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
968
969         /* Skip "next page" elements */
970         if (!page)
971                 return;
972
973         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
974                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
975         __free_pages(page, PAGES_PER_SGE_SHIFT);
976
977         sw_buf->page = NULL;
978         sge->addr_hi = 0;
979         sge->addr_lo = 0;
980 }
981
982 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
983                                            struct bnx2x_fastpath *fp, int last)
984 {
985         int i;
986
987         for (i = 0; i < last; i++)
988                 bnx2x_free_rx_sge(bp, fp, i);
989 }
990
991 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
992                                      struct bnx2x_fastpath *fp, u16 index)
993 {
994         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
995         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
997         dma_addr_t mapping;
998
999         if (unlikely(page == NULL))
1000                 return -ENOMEM;
1001
1002         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1003                                PCI_DMA_FROMDEVICE);
1004         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1005                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006                 return -ENOMEM;
1007         }
1008
1009         sw_buf->page = page;
1010         pci_unmap_addr_set(sw_buf, mapping, mapping);
1011
1012         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1013         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1014
1015         return 0;
1016 }
1017
1018 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1019                                      struct bnx2x_fastpath *fp, u16 index)
1020 {
1021         struct sk_buff *skb;
1022         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1023         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1024         dma_addr_t mapping;
1025
1026         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1027         if (unlikely(skb == NULL))
1028                 return -ENOMEM;
1029
1030         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1031                                  PCI_DMA_FROMDEVICE);
1032         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1033                 dev_kfree_skb(skb);
1034                 return -ENOMEM;
1035         }
1036
1037         rx_buf->skb = skb;
1038         pci_unmap_addr_set(rx_buf, mapping, mapping);
1039
1040         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1041         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1042
1043         return 0;
1044 }
1045
1046 /* note that we are not allocating a new skb,
1047  * we are just moving one from cons to prod
1048  * we are not creating a new mapping,
1049  * so there is no need to check for dma_mapping_error().
1050  */
1051 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1052                                struct sk_buff *skb, u16 cons, u16 prod)
1053 {
1054         struct bnx2x *bp = fp->bp;
1055         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1056         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1057         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1058         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1059
1060         pci_dma_sync_single_for_device(bp->pdev,
1061                                        pci_unmap_addr(cons_rx_buf, mapping),
1062                                        bp->rx_offset + RX_COPY_THRESH,
1063                                        PCI_DMA_FROMDEVICE);
1064
1065         prod_rx_buf->skb = cons_rx_buf->skb;
1066         pci_unmap_addr_set(prod_rx_buf, mapping,
1067                            pci_unmap_addr(cons_rx_buf, mapping));
1068         *prod_bd = *cons_bd;
1069 }
1070
1071 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1072                                              u16 idx)
1073 {
1074         u16 last_max = fp->last_max_sge;
1075
1076         if (SUB_S16(idx, last_max) > 0)
1077                 fp->last_max_sge = idx;
1078 }
1079
1080 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1081 {
1082         int i, j;
1083
1084         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1085                 int idx = RX_SGE_CNT * i - 1;
1086
1087                 for (j = 0; j < 2; j++) {
1088                         SGE_MASK_CLEAR_BIT(fp, idx);
1089                         idx--;
1090                 }
1091         }
1092 }
1093
1094 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1095                                   struct eth_fast_path_rx_cqe *fp_cqe)
1096 {
1097         struct bnx2x *bp = fp->bp;
1098         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1099                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1100                       BCM_PAGE_SHIFT;
1101         u16 last_max, last_elem, first_elem;
1102         u16 delta = 0;
1103         u16 i;
1104
1105         if (!sge_len)
1106                 return;
1107
1108         /* First mark all used pages */
1109         for (i = 0; i < sge_len; i++)
1110                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1111
1112         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1113            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1114
1115         /* Here we assume that the last SGE index is the biggest */
1116         prefetch((void *)(fp->sge_mask));
1117         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1118
1119         last_max = RX_SGE(fp->last_max_sge);
1120         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1121         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1122
1123         /* If ring is not full */
1124         if (last_elem + 1 != first_elem)
1125                 last_elem++;
1126
1127         /* Now update the prod */
1128         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1129                 if (likely(fp->sge_mask[i]))
1130                         break;
1131
1132                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1133                 delta += RX_SGE_MASK_ELEM_SZ;
1134         }
1135
1136         if (delta > 0) {
1137                 fp->rx_sge_prod += delta;
1138                 /* clear page-end entries */
1139                 bnx2x_clear_sge_mask_next_elems(fp);
1140         }
1141
1142         DP(NETIF_MSG_RX_STATUS,
1143            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1144            fp->last_max_sge, fp->rx_sge_prod);
1145 }
1146
1147 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1148 {
1149         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1150         memset(fp->sge_mask, 0xff,
1151                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1152
1153         /* Clear the two last indices in the page to 1:
1154            these are the indices that correspond to the "next" element,
1155            hence will never be indicated and should be removed from
1156            the calculations. */
1157         bnx2x_clear_sge_mask_next_elems(fp);
1158 }
1159
1160 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1161                             struct sk_buff *skb, u16 cons, u16 prod)
1162 {
1163         struct bnx2x *bp = fp->bp;
1164         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1165         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1166         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1167         dma_addr_t mapping;
1168
1169         /* move empty skb from pool to prod and map it */
1170         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1171         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1172                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1173         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1174
1175         /* move partial skb from cons to pool (don't unmap yet) */
1176         fp->tpa_pool[queue] = *cons_rx_buf;
1177
1178         /* mark bin state as start - print error if current state != stop */
1179         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1180                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1181
1182         fp->tpa_state[queue] = BNX2X_TPA_START;
1183
1184         /* point prod_bd to new skb */
1185         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1186         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1187
1188 #ifdef BNX2X_STOP_ON_ERROR
1189         fp->tpa_queue_used |= (1 << queue);
1190 #ifdef __powerpc64__
1191         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1192 #else
1193         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1194 #endif
1195            fp->tpa_queue_used);
1196 #endif
1197 }
1198
1199 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1200                                struct sk_buff *skb,
1201                                struct eth_fast_path_rx_cqe *fp_cqe,
1202                                u16 cqe_idx)
1203 {
1204         struct sw_rx_page *rx_pg, old_rx_pg;
1205         struct page *sge;
1206         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207         u32 i, frag_len, frag_size, pages;
1208         int err;
1209         int j;
1210
1211         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1213
1214         /* This is needed in order to enable forwarding support */
1215         if (frag_size)
1216                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1217                                                max(frag_size, (u32)len_on_bd));
1218
1219 #ifdef BNX2X_STOP_ON_ERROR
1220         if (pages > 8*PAGES_PER_SGE) {
1221                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1222                           pages, cqe_idx);
1223                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1224                           fp_cqe->pkt_len, len_on_bd);
1225                 bnx2x_panic();
1226                 return -EINVAL;
1227         }
1228 #endif
1229
1230         /* Run through the SGL and compose the fragmented skb */
1231         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1232                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1233
1234                 /* FW gives the indices of the SGE as if the ring is an array
1235                    (meaning that "next" element will consume 2 indices) */
1236                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1237                 rx_pg = &fp->rx_page_ring[sge_idx];
1238                 sge = rx_pg->page;
1239                 old_rx_pg = *rx_pg;
1240
1241                 /* If we fail to allocate a substitute page, we simply stop
1242                    where we are and drop the whole packet */
1243                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244                 if (unlikely(err)) {
1245                         bp->eth_stats.rx_skb_alloc_failed++;
1246                         return err;
1247                 }
1248
1249                 /* Unmap the page as we r going to pass it to the stack */
1250                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252
1253                 /* Add one frag and update the appropriate fields in the skb */
1254                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256                 skb->data_len += frag_len;
1257                 skb->truesize += frag_len;
1258                 skb->len += frag_len;
1259
1260                 frag_size -= frag_len;
1261         }
1262
1263         return 0;
1264 }
1265
1266 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268                            u16 cqe_idx)
1269 {
1270         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271         struct sk_buff *skb = rx_buf->skb;
1272         /* alloc new skb */
1273         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275         /* Unmap skb in the pool anyway, as we are going to change
1276            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277            fails. */
1278         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1280
1281         if (likely(new_skb)) {
1282                 /* fix ip xsum and give it to the stack */
1283                 /* (no need to map the new skb) */
1284
1285                 prefetch(skb);
1286                 prefetch(((char *)(skb)) + 128);
1287
1288 #ifdef BNX2X_STOP_ON_ERROR
1289                 if (pad + len > bp->rx_buf_size) {
1290                         BNX2X_ERR("skb_put is about to fail...  "
1291                                   "pad %d  len %d  rx_buf_size %d\n",
1292                                   pad, len, bp->rx_buf_size);
1293                         bnx2x_panic();
1294                         return;
1295                 }
1296 #endif
1297
1298                 skb_reserve(skb, pad);
1299                 skb_put(skb, len);
1300
1301                 skb->protocol = eth_type_trans(skb, bp->dev);
1302                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1303
1304                 {
1305                         struct iphdr *iph;
1306
1307                         iph = (struct iphdr *)skb->data;
1308                         iph->check = 0;
1309                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1310                 }
1311
1312                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1313                                          &cqe->fast_path_cqe, cqe_idx)) {
1314 #ifdef BCM_VLAN
1315                         if ((bp->vlgrp != NULL) &&
1316                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1317                              PARSING_FLAGS_VLAN))
1318                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1319                                                 le16_to_cpu(cqe->fast_path_cqe.
1320                                                             vlan_tag));
1321                         else
1322 #endif
1323                                 netif_receive_skb(skb);
1324                 } else {
1325                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1326                            " - dropping packet!\n");
1327                         dev_kfree_skb(skb);
1328                 }
1329
1330                 bp->dev->last_rx = jiffies;
1331
1332                 /* put new skb in bin */
1333                 fp->tpa_pool[queue].skb = new_skb;
1334
1335         } else {
1336                 /* else drop the packet and keep the buffer in the bin */
1337                 DP(NETIF_MSG_RX_STATUS,
1338                    "Failed to allocate new skb - dropping packet!\n");
1339                 bp->eth_stats.rx_skb_alloc_failed++;
1340         }
1341
1342         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1343 }
1344
1345 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346                                         struct bnx2x_fastpath *fp,
1347                                         u16 bd_prod, u16 rx_comp_prod,
1348                                         u16 rx_sge_prod)
1349 {
1350         struct tstorm_eth_rx_producers rx_prods = {0};
1351         int i;
1352
1353         /* Update producers */
1354         rx_prods.bd_prod = bd_prod;
1355         rx_prods.cqe_prod = rx_comp_prod;
1356         rx_prods.sge_prod = rx_sge_prod;
1357
1358         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361                        ((u32 *)&rx_prods)[i]);
1362
1363         DP(NETIF_MSG_RX_STATUS,
1364            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1365            bd_prod, rx_comp_prod, rx_sge_prod);
1366 }
1367
1368 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369 {
1370         struct bnx2x *bp = fp->bp;
1371         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1372         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373         int rx_pkt = 0;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 u16 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_use_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559                 bp->dev->last_rx = jiffies;
1560
1561 next_rx:
1562                 rx_buf->skb = NULL;
1563
1564                 bd_cons = NEXT_RX_IDX(bd_cons);
1565                 bd_prod = NEXT_RX_IDX(bd_prod);
1566                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567                 rx_pkt++;
1568 next_cqe:
1569                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1571
1572                 if (rx_pkt == budget)
1573                         break;
1574         } /* while */
1575
1576         fp->rx_bd_cons = bd_cons;
1577         fp->rx_bd_prod = bd_prod_fw;
1578         fp->rx_comp_cons = sw_comp_cons;
1579         fp->rx_comp_prod = sw_comp_prod;
1580
1581         /* Update producers */
1582         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583                              fp->rx_sge_prod);
1584         mmiowb(); /* keep prod updates ordered */
1585
1586         fp->rx_pkt += rx_pkt;
1587         fp->rx_calls++;
1588
1589         return rx_pkt;
1590 }
1591
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593 {
1594         struct bnx2x_fastpath *fp = fp_cookie;
1595         struct bnx2x *bp = fp->bp;
1596         struct net_device *dev = bp->dev;
1597         int index = FP_IDX(fp);
1598
1599         /* Return here if interrupt is disabled */
1600         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602                 return IRQ_HANDLED;
1603         }
1604
1605         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606            index, FP_SB_ID(fp));
1607         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1608
1609 #ifdef BNX2X_STOP_ON_ERROR
1610         if (unlikely(bp->panic))
1611                 return IRQ_HANDLED;
1612 #endif
1613
1614         prefetch(fp->rx_cons_sb);
1615         prefetch(fp->tx_cons_sb);
1616         prefetch(&fp->status_blk->c_status_block.status_block_index);
1617         prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1620
1621         return IRQ_HANDLED;
1622 }
1623
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625 {
1626         struct net_device *dev = dev_instance;
1627         struct bnx2x *bp = netdev_priv(dev);
1628         u16 status = bnx2x_ack_int(bp);
1629         u16 mask;
1630
1631         /* Return here if interrupt is shared and it's not for us */
1632         if (unlikely(status == 0)) {
1633                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634                 return IRQ_NONE;
1635         }
1636         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1637
1638         /* Return here if interrupt is disabled */
1639         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641                 return IRQ_HANDLED;
1642         }
1643
1644 #ifdef BNX2X_STOP_ON_ERROR
1645         if (unlikely(bp->panic))
1646                 return IRQ_HANDLED;
1647 #endif
1648
1649         mask = 0x2 << bp->fp[0].sb_id;
1650         if (status & mask) {
1651                 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653                 prefetch(fp->rx_cons_sb);
1654                 prefetch(fp->tx_cons_sb);
1655                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
1660                 status &= ~mask;
1661         }
1662
1663
1664         if (unlikely(status & 0x1)) {
1665                 schedule_work(&bp->sp_task);
1666
1667                 status &= ~0x1;
1668                 if (!status)
1669                         return IRQ_HANDLED;
1670         }
1671
1672         if (status)
1673                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674                    status);
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 /* end of fast path */
1680
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1682
1683 /* Link */
1684
1685 /*
1686  * General service functions
1687  */
1688
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1690 {
1691         u32 lock_status;
1692         u32 resource_bit = (1 << resource);
1693         int func = BP_FUNC(bp);
1694         u32 hw_lock_control_reg;
1695         int cnt;
1696
1697         /* Validating that the resource is within range */
1698         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699                 DP(NETIF_MSG_HW,
1700                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702                 return -EINVAL;
1703         }
1704
1705         if (func <= 5) {
1706                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707         } else {
1708                 hw_lock_control_reg =
1709                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710         }
1711
1712         /* Validating that the resource is not already taken */
1713         lock_status = REG_RD(bp, hw_lock_control_reg);
1714         if (lock_status & resource_bit) {
1715                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1716                    lock_status, resource_bit);
1717                 return -EEXIST;
1718         }
1719
1720         /* Try for 1 second every 5ms */
1721         for (cnt = 0; cnt < 200; cnt++) {
1722                 /* Try to acquire the lock */
1723                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724                 lock_status = REG_RD(bp, hw_lock_control_reg);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         int func = BP_FUNC(bp);
1739         u32 hw_lock_control_reg;
1740
1741         /* Validating that the resource is within range */
1742         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743                 DP(NETIF_MSG_HW,
1744                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746                 return -EINVAL;
1747         }
1748
1749         if (func <= 5) {
1750                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751         } else {
1752                 hw_lock_control_reg =
1753                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754         }
1755
1756         /* Validating that the resource is currently taken */
1757         lock_status = REG_RD(bp, hw_lock_control_reg);
1758         if (!(lock_status & resource_bit)) {
1759                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1760                    lock_status, resource_bit);
1761                 return -EFAULT;
1762         }
1763
1764         REG_WR(bp, hw_lock_control_reg, resource_bit);
1765         return 0;
1766 }
1767
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1770 {
1771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1772
1773         mutex_lock(&bp->port.phy_mutex);
1774
1775         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778 }
1779
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1781 {
1782         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783
1784         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787
1788         mutex_unlock(&bp->port.phy_mutex);
1789 }
1790
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1792 {
1793         /* The GPIO should be swapped if swap register is set and active */
1794         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1796         int gpio_shift = gpio_num +
1797                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798         u32 gpio_mask = (1 << gpio_shift);
1799         u32 gpio_reg;
1800
1801         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803                 return -EINVAL;
1804         }
1805
1806         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807         /* read GPIO and mask except the float bits */
1808         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1809
1810         switch (mode) {
1811         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set CLR */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821                    gpio_num, gpio_shift);
1822                 /* clear FLOAT and set SET */
1823                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825                 break;
1826
1827         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1828                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829                    gpio_num, gpio_shift);
1830                 /* set FLOAT */
1831                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832                 break;
1833
1834         default:
1835                 break;
1836         }
1837
1838         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1840
1841         return 0;
1842 }
1843
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1845 {
1846         u32 spio_mask = (1 << spio_num);
1847         u32 spio_reg;
1848
1849         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850             (spio_num > MISC_REGISTERS_SPIO_7)) {
1851                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856         /* read SPIO and mask except the float bits */
1857         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1861                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862                 /* clear FLOAT and set CLR */
1863                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865                 break;
1866
1867         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1868                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869                 /* clear FLOAT and set SET */
1870                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872                 break;
1873
1874         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876                 /* set FLOAT */
1877                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878                 break;
1879
1880         default:
1881                 break;
1882         }
1883
1884         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1886
1887         return 0;
1888 }
1889
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1891 {
1892         switch (bp->link_vars.ieee_fc) {
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895                                           ADVERTISED_Pause);
1896                 break;
1897         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899                                          ADVERTISED_Pause);
1900                 break;
1901         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903                 break;
1904         default:
1905                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906                                           ADVERTISED_Pause);
1907                 break;
1908         }
1909 }
1910
1911 static void bnx2x_link_report(struct bnx2x *bp)
1912 {
1913         if (bp->link_vars.link_up) {
1914                 if (bp->state == BNX2X_STATE_OPEN)
1915                         netif_carrier_on(bp->dev);
1916                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1917
1918                 printk("%d Mbps ", bp->link_vars.line_speed);
1919
1920                 if (bp->link_vars.duplex == DUPLEX_FULL)
1921                         printk("full duplex");
1922                 else
1923                         printk("half duplex");
1924
1925                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927                                 printk(", receive ");
1928                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929                                         printk("& transmit ");
1930                         } else {
1931                                 printk(", transmit ");
1932                         }
1933                         printk("flow control ON");
1934                 }
1935                 printk("\n");
1936
1937         } else { /* link_down */
1938                 netif_carrier_off(bp->dev);
1939                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1940         }
1941 }
1942
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944 {
1945         if (!BP_NOMCP(bp)) {
1946                 u8 rc;
1947
1948                 /* Initialize link parameters structure variables */
1949                 /* It is recommended to turn off RX FC for jumbo frames
1950                    for better performance */
1951                 if (IS_E1HMF(bp))
1952                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953                 else if (bp->dev->mtu > 5000)
1954                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955                 else
1956                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1957
1958                 bnx2x_acquire_phy_lock(bp);
1959                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960                 bnx2x_release_phy_lock(bp);
1961
1962                 if (bp->link_vars.link_up)
1963                         bnx2x_link_report(bp);
1964
1965                 bnx2x_calc_fc_adv(bp);
1966
1967                 return rc;
1968         }
1969         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970         return -EINVAL;
1971 }
1972
1973 static void bnx2x_link_set(struct bnx2x *bp)
1974 {
1975         if (!BP_NOMCP(bp)) {
1976                 bnx2x_acquire_phy_lock(bp);
1977                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978                 bnx2x_release_phy_lock(bp);
1979
1980                 bnx2x_calc_fc_adv(bp);
1981         } else
1982                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1983 }
1984
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1986 {
1987         if (!BP_NOMCP(bp)) {
1988                 bnx2x_acquire_phy_lock(bp);
1989                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990                 bnx2x_release_phy_lock(bp);
1991         } else
1992                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1993 }
1994
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1996 {
1997         u8 rc;
1998
1999         bnx2x_acquire_phy_lock(bp);
2000         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001         bnx2x_release_phy_lock(bp);
2002
2003         return rc;
2004 }
2005
2006 /* Calculates the sum of vn_min_rates.
2007    It's needed for further normalizing of the min_rates.
2008
2009    Returns:
2010      sum of vn_min_rates
2011        or
2012      0 - if all the min_rates are 0.
2013      In the later case fairness algorithm should be deactivated.
2014      If not all min_rates are zero then those that are zeroes will
2015      be set to 1.
2016  */
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018 {
2019         int i, port = BP_PORT(bp);
2020         u32 wsum = 0;
2021         int all_zero = 1;
2022
2023         for (i = 0; i < E1HVN_MAX; i++) {
2024                 u32 vn_cfg =
2025                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029                         /* If min rate is zero - set it to 1 */
2030                         if (!vn_min_rate)
2031                                 vn_min_rate = DEF_MIN_RATE;
2032                         else
2033                                 all_zero = 0;
2034
2035                         wsum += vn_min_rate;
2036                 }
2037         }
2038
2039         /* ... only if all min rates are zeros - disable FAIRNESS */
2040         if (all_zero)
2041                 return 0;
2042
2043         return wsum;
2044 }
2045
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047                                    int en_fness,
2048                                    u16 port_rate,
2049                                    struct cmng_struct_per_port *m_cmng_port)
2050 {
2051         u32 r_param = port_rate / 8;
2052         int port = BP_PORT(bp);
2053         int i;
2054
2055         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057         /* Enable minmax only if we are in e1hmf mode */
2058         if (IS_E1HMF(bp)) {
2059                 u32 fair_periodic_timeout_usec;
2060                 u32 t_fair;
2061
2062                 /* Enable rate shaping and fairness */
2063                 m_cmng_port->flags.cmng_vn_enable = 1;
2064                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065                 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067                 if (!en_fness)
2068                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069                            "  fairness will be disabled\n");
2070
2071                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072                 m_cmng_port->rs_vars.rs_periodic_timeout =
2073                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075                 /* this is the threshold below which no timer arming will occur
2076                    1.25 coefficient is for the threshold to be a little bigger
2077                    than the real time, to compensate for timer in-accuracy */
2078                 m_cmng_port->rs_vars.rs_threshold =
2079                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081                 /* resolution of fairness timer */
2082                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084                 t_fair = T_FAIR_COEF / port_rate;
2085
2086                 /* this is the threshold below which we won't arm
2087                    the timer anymore */
2088                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090                 /* we multiply by 1e3/8 to get bytes/msec.
2091                    We don't want the credits to pass a credit
2092                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093                 m_cmng_port->fair_vars.upper_bound =
2094                                                 r_param * t_fair * FAIR_MEM;
2095                 /* since each tick is 4 usec */
2096                 m_cmng_port->fair_vars.fairness_timeout =
2097                                                 fair_periodic_timeout_usec / 4;
2098
2099         } else {
2100                 /* Disable rate shaping and fairness */
2101                 m_cmng_port->flags.cmng_vn_enable = 0;
2102                 m_cmng_port->flags.fairness_enable = 0;
2103                 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105                 DP(NETIF_MSG_IFUP,
2106                    "Single function mode  minmax will be disabled\n");
2107         }
2108
2109         /* Store it to internal memory */
2110         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113                        ((u32 *)(m_cmng_port))[i]);
2114 }
2115
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117                                    u32 wsum, u16 port_rate,
2118                                  struct cmng_struct_per_port *m_cmng_port)
2119 {
2120         struct rate_shaping_vars_per_vn m_rs_vn;
2121         struct fairness_vars_per_vn m_fair_vn;
2122         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123         u16 vn_min_rate, vn_max_rate;
2124         int i;
2125
2126         /* If function is hidden - set min and max to zeroes */
2127         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128                 vn_min_rate = 0;
2129                 vn_max_rate = 0;
2130
2131         } else {
2132                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135                    if current min rate is zero - set it to 1.
2136                    This is a requirement of the algorithm. */
2137                 if ((vn_min_rate == 0) && wsum)
2138                         vn_min_rate = DEF_MIN_RATE;
2139                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141         }
2142
2143         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2144            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149         /* global vn counter - maximal Mbps for this vn */
2150         m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152         /* quota - number of bytes transmitted in this period */
2153         m_rs_vn.vn_counter.quota =
2154                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156 #ifdef BNX2X_PER_PROT_QOS
2157         /* per protocol counter */
2158         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159                 /* maximal Mbps for this protocol */
2160                 m_rs_vn.protocol_counters[protocol].rate =
2161                                                 protocol_max_rate[protocol];
2162                 /* the quota in each timer period -
2163                    number of bytes transmitted in this period */
2164                 m_rs_vn.protocol_counters[protocol].quota =
2165                         (u32)(rs_periodic_timeout_usec *
2166                           ((double)m_rs_vn.
2167                                    protocol_counters[protocol].rate/8));
2168         }
2169 #endif
2170
2171         if (wsum) {
2172                 /* credit for each period of the fairness algorithm:
2173                    number of bytes in T_FAIR (the vn share the port rate).
2174                    wsum should not be larger than 10000, thus
2175                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176                 m_fair_vn.vn_credit_delta =
2177                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180                    m_fair_vn.vn_credit_delta);
2181         }
2182
2183 #ifdef BNX2X_PER_PROT_QOS
2184         do {
2185                 u32 protocolWeightSum = 0;
2186
2187                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188                         protocolWeightSum +=
2189                                         drvInit.protocol_min_rate[protocol];
2190                 /* per protocol counter -
2191                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192                 if (protocolWeightSum > 0) {
2193                         for (protocol = 0;
2194                              protocol < NUM_OF_PROTOCOLS; protocol++)
2195                                 /* credit for each period of the
2196                                    fairness algorithm - number of bytes in
2197                                    T_FAIR (the protocol share the vn rate) */
2198                                 m_fair_vn.protocol_credit_delta[protocol] =
2199                                         (u32)((vn_min_rate / 8) * t_fair *
2200                                         protocol_min_rate / protocolWeightSum);
2201                 }
2202         } while (0);
2203 #endif
2204
2205         /* Store it to internal memory */
2206         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209                        ((u32 *)(&m_rs_vn))[i]);
2210
2211         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214                        ((u32 *)(&m_fair_vn))[i]);
2215 }
2216
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2219 {
2220         int vn;
2221
2222         /* Make sure that we are synced with the current statistics */
2223         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
2225         bnx2x_acquire_phy_lock(bp);
2226         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227         bnx2x_release_phy_lock(bp);
2228
2229         if (bp->link_vars.link_up) {
2230
2231                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232                         struct host_port_stats *pstats;
2233
2234                         pstats = bnx2x_sp(bp, port_stats);
2235                         /* reset old bmac stats */
2236                         memset(&(pstats->mac_stx[0]), 0,
2237                                sizeof(struct mac_stx));
2238                 }
2239                 if ((bp->state == BNX2X_STATE_OPEN) ||
2240                     (bp->state == BNX2X_STATE_DISABLED))
2241                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242         }
2243
2244         /* indicate link status */
2245         bnx2x_link_report(bp);
2246
2247         if (IS_E1HMF(bp)) {
2248                 int func;
2249
2250                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251                         if (vn == BP_E1HVN(bp))
2252                                 continue;
2253
2254                         func = ((vn << 1) | BP_PORT(bp));
2255
2256                         /* Set the attention towards other drivers
2257                            on the same port */
2258                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260                 }
2261         }
2262
2263         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264                 struct cmng_struct_per_port m_cmng_port;
2265                 u32 wsum;
2266                 int port = BP_PORT(bp);
2267
2268                 /* Init RATE SHAPING and FAIRNESS contexts */
2269                 wsum = bnx2x_calc_vn_wsum(bp);
2270                 bnx2x_init_port_minmax(bp, (int)wsum,
2271                                         bp->link_vars.line_speed,
2272                                         &m_cmng_port);
2273                 if (IS_E1HMF(bp))
2274                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276                                         wsum, bp->link_vars.line_speed,
2277                                                      &m_cmng_port);
2278         }
2279 }
2280
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2282 {
2283         if (bp->state != BNX2X_STATE_OPEN)
2284                 return;
2285
2286         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2287
2288         if (bp->link_vars.link_up)
2289                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290         else
2291                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
2293         /* indicate link status */
2294         bnx2x_link_report(bp);
2295 }
2296
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2298 {
2299         int port = BP_PORT(bp);
2300         u32 val;
2301
2302         bp->port.pmf = 1;
2303         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305         /* enable nig attention */
2306         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2309
2310         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2311 }
2312
2313 /* end of Link */
2314
2315 /* slow path */
2316
2317 /*
2318  * General service functions
2319  */
2320
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323                          u32 data_hi, u32 data_lo, int common)
2324 {
2325         int func = BP_FUNC(bp);
2326
2327         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2329            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333 #ifdef BNX2X_STOP_ON_ERROR
2334         if (unlikely(bp->panic))
2335                 return -EIO;
2336 #endif
2337
2338         spin_lock_bh(&bp->spq_lock);
2339
2340         if (!bp->spq_left) {
2341                 BNX2X_ERR("BUG! SPQ ring full!\n");
2342                 spin_unlock_bh(&bp->spq_lock);
2343                 bnx2x_panic();
2344                 return -EBUSY;
2345         }
2346
2347         /* CID needs port number to be encoded int it */
2348         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350                                      HW_CID(bp, cid)));
2351         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352         if (common)
2353                 bp->spq_prod_bd->hdr.type |=
2354                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359         bp->spq_left--;
2360
2361         if (bp->spq_prod_bd == bp->spq_last_bd) {
2362                 bp->spq_prod_bd = bp->spq;
2363                 bp->spq_prod_idx = 0;
2364                 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366         } else {
2367                 bp->spq_prod_bd++;
2368                 bp->spq_prod_idx++;
2369         }
2370
2371         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2372                bp->spq_prod_idx);
2373
2374         spin_unlock_bh(&bp->spq_lock);
2375         return 0;
2376 }
2377
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2380 {
2381         u32 i, j, val;
2382         int rc = 0;
2383
2384         might_sleep();
2385         i = 100;
2386         for (j = 0; j < i*10; j++) {
2387                 val = (1UL << 31);
2388                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390                 if (val & (1L << 31))
2391                         break;
2392
2393                 msleep(5);
2394         }
2395         if (!(val & (1L << 31))) {
2396                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2397                 rc = -EBUSY;
2398         }
2399
2400         return rc;
2401 }
2402
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2405 {
2406         u32 val = 0;
2407
2408         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 }
2410
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412 {
2413         struct host_def_status_block *def_sb = bp->def_status_blk;
2414         u16 rc = 0;
2415
2416         barrier(); /* status block is written to by the chip */
2417         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419                 rc |= 1;
2420         }
2421         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423                 rc |= 2;
2424         }
2425         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427                 rc |= 4;
2428         }
2429         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431                 rc |= 8;
2432         }
2433         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435                 rc |= 16;
2436         }
2437         return rc;
2438 }
2439
2440 /*
2441  * slow path service functions
2442  */
2443
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445 {
2446         int port = BP_PORT(bp);
2447         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448                        COMMAND_REG_ATTN_BITS_SET);
2449         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452                                        NIG_REG_MASK_INTERRUPT_PORT0;
2453         u32 aeu_mask;
2454
2455         if (bp->attn_state & asserted)
2456                 BNX2X_ERR("IGU ERROR\n");
2457
2458         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459         aeu_mask = REG_RD(bp, aeu_addr);
2460
2461         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2462            aeu_mask, asserted);
2463         aeu_mask &= ~(asserted & 0xff);
2464         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2465
2466         REG_WR(bp, aeu_addr, aeu_mask);
2467         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2468
2469         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2470         bp->attn_state |= asserted;
2471         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2472
2473         if (asserted & ATTN_HARD_WIRED_MASK) {
2474                 if (asserted & ATTN_NIG_FOR_FUNC) {
2475
2476                         /* save nig interrupt mask */
2477                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478                         REG_WR(bp, nig_int_mask_addr, 0);
2479
2480                         bnx2x_link_attn(bp);
2481
2482                         /* handle unicore attn? */
2483                 }
2484                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487                 if (asserted & GPIO_2_FUNC)
2488                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490                 if (asserted & GPIO_3_FUNC)
2491                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493                 if (asserted & GPIO_4_FUNC)
2494                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496                 if (port == 0) {
2497                         if (asserted & ATTN_GENERAL_ATTN_1) {
2498                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500                         }
2501                         if (asserted & ATTN_GENERAL_ATTN_2) {
2502                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504                         }
2505                         if (asserted & ATTN_GENERAL_ATTN_3) {
2506                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508                         }
2509                 } else {
2510                         if (asserted & ATTN_GENERAL_ATTN_4) {
2511                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513                         }
2514                         if (asserted & ATTN_GENERAL_ATTN_5) {
2515                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517                         }
2518                         if (asserted & ATTN_GENERAL_ATTN_6) {
2519                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521                         }
2522                 }
2523
2524         } /* if hardwired */
2525
2526         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527            asserted, hc_addr);
2528         REG_WR(bp, hc_addr, asserted);
2529
2530         /* now set back the mask */
2531         if (asserted & ATTN_NIG_FOR_FUNC)
2532                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2533 }
2534
2535 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2536 {
2537         int port = BP_PORT(bp);
2538         int reg_offset;
2539         u32 val;
2540
2541         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2543
2544         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2545
2546                 val = REG_RD(bp, reg_offset);
2547                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548                 REG_WR(bp, reg_offset, val);
2549
2550                 BNX2X_ERR("SPIO5 hw attention\n");
2551
2552                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554                         /* Fan failure attention */
2555
2556                         /* The PHY reset is controlled by GPIO 1 */
2557                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2558                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2559                         /* Low power mode is controlled by GPIO 2 */
2560                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2561                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2562                         /* mark the failure */
2563                         bp->link_params.ext_phy_config &=
2564                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2565                         bp->link_params.ext_phy_config |=
2566                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567                         SHMEM_WR(bp,
2568                                  dev_info.port_hw_config[port].
2569                                                         external_phy_config,
2570                                  bp->link_params.ext_phy_config);
2571                         /* log the failure */
2572                         printk(KERN_ERR PFX "Fan Failure on Network"
2573                                " Controller %s has caused the driver to"
2574                                " shutdown the card to prevent permanent"
2575                                " damage.  Please contact Dell Support for"
2576                                " assistance\n", bp->dev->name);
2577                         break;
2578
2579                 default:
2580                         break;
2581                 }
2582         }
2583
2584         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586                 val = REG_RD(bp, reg_offset);
2587                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588                 REG_WR(bp, reg_offset, val);
2589
2590                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591                           (attn & HW_INTERRUT_ASSERT_SET_0));
2592                 bnx2x_panic();
2593         }
2594 }
2595
2596 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597 {
2598         u32 val;
2599
2600         if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604                 /* DORQ discard attention */
2605                 if (val & 0x2)
2606                         BNX2X_ERR("FATAL error from DORQ\n");
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611                 int port = BP_PORT(bp);
2612                 int reg_offset;
2613
2614                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617                 val = REG_RD(bp, reg_offset);
2618                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619                 REG_WR(bp, reg_offset, val);
2620
2621                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622                           (attn & HW_INTERRUT_ASSERT_SET_1));
2623                 bnx2x_panic();
2624         }
2625 }
2626
2627 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628 {
2629         u32 val;
2630
2631         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635                 /* CFC error attention */
2636                 if (val & 0x2)
2637                         BNX2X_ERR("FATAL error from CFC\n");
2638         }
2639
2640         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644                 /* RQ_USDMDP_FIFO_OVERFLOW */
2645                 if (val & 0x18000)
2646                         BNX2X_ERR("FATAL error from PXP\n");
2647         }
2648
2649         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651                 int port = BP_PORT(bp);
2652                 int reg_offset;
2653
2654                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657                 val = REG_RD(bp, reg_offset);
2658                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659                 REG_WR(bp, reg_offset, val);
2660
2661                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662                           (attn & HW_INTERRUT_ASSERT_SET_2));
2663                 bnx2x_panic();
2664         }
2665 }
2666
2667 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668 {
2669         u32 val;
2670
2671         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
2673                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674                         int func = BP_FUNC(bp);
2675
2676                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677                         bnx2x__link_status_update(bp);
2678                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679                                                         DRV_STATUS_PMF)
2680                                 bnx2x_pmf_update(bp);
2681
2682                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2683
2684                         BNX2X_ERR("MC assert!\n");
2685                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689                         bnx2x_panic();
2690
2691                 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693                         BNX2X_ERR("MCP assert!\n");
2694                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2695                         bnx2x_fw_dump(bp);
2696
2697                 } else
2698                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699         }
2700
2701         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2702                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703                 if (attn & BNX2X_GRC_TIMEOUT) {
2704                         val = CHIP_IS_E1H(bp) ?
2705                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707                 }
2708                 if (attn & BNX2X_GRC_RSV) {
2709                         val = CHIP_IS_E1H(bp) ?
2710                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712                 }
2713                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2714         }
2715 }
2716
2717 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718 {
2719         struct attn_route attn;
2720         struct attn_route group_mask;
2721         int port = BP_PORT(bp);
2722         int index;
2723         u32 reg_addr;
2724         u32 val;
2725         u32 aeu_mask;
2726
2727         /* need to take HW lock because MCP or other port might also
2728            try to handle this event */
2729         bnx2x_acquire_alr(bp);
2730
2731         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2735         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2737
2738         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739                 if (deasserted & (1 << index)) {
2740                         group_mask = bp->attn_group[index];
2741
2742                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743                            index, group_mask.sig[0], group_mask.sig[1],
2744                            group_mask.sig[2], group_mask.sig[3]);
2745
2746                         bnx2x_attn_int_deasserted3(bp,
2747                                         attn.sig[3] & group_mask.sig[3]);
2748                         bnx2x_attn_int_deasserted1(bp,
2749                                         attn.sig[1] & group_mask.sig[1]);
2750                         bnx2x_attn_int_deasserted2(bp,
2751                                         attn.sig[2] & group_mask.sig[2]);
2752                         bnx2x_attn_int_deasserted0(bp,
2753                                         attn.sig[0] & group_mask.sig[0]);
2754
2755                         if ((attn.sig[0] & group_mask.sig[0] &
2756                                                 HW_PRTY_ASSERT_SET_0) ||
2757                             (attn.sig[1] & group_mask.sig[1] &
2758                                                 HW_PRTY_ASSERT_SET_1) ||
2759                             (attn.sig[2] & group_mask.sig[2] &
2760                                                 HW_PRTY_ASSERT_SET_2))
2761                                 BNX2X_ERR("FATAL HW block parity attention\n");
2762                 }
2763         }
2764
2765         bnx2x_release_alr(bp);
2766
2767         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2768
2769         val = ~deasserted;
2770         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771            val, reg_addr);
2772         REG_WR(bp, reg_addr, val);
2773
2774         if (~bp->attn_state & deasserted)
2775                 BNX2X_ERR("IGU ERROR\n");
2776
2777         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
2780         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781         aeu_mask = REG_RD(bp, reg_addr);
2782
2783         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2784            aeu_mask, deasserted);
2785         aeu_mask |= (deasserted & 0xff);
2786         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2787
2788         REG_WR(bp, reg_addr, aeu_mask);
2789         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2790
2791         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792         bp->attn_state &= ~deasserted;
2793         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794 }
2795
2796 static void bnx2x_attn_int(struct bnx2x *bp)
2797 {
2798         /* read local copy of bits */
2799         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801         u32 attn_state = bp->attn_state;
2802
2803         /* look for changed bits */
2804         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2805         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2806
2807         DP(NETIF_MSG_HW,
2808            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2809            attn_bits, attn_ack, asserted, deasserted);
2810
2811         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2812                 BNX2X_ERR("BAD attention state\n");
2813
2814         /* handle bits that were raised */
2815         if (asserted)
2816                 bnx2x_attn_int_asserted(bp, asserted);
2817
2818         if (deasserted)
2819                 bnx2x_attn_int_deasserted(bp, deasserted);
2820 }
2821
2822 static void bnx2x_sp_task(struct work_struct *work)
2823 {
2824         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825         u16 status;
2826
2827
2828         /* Return here if interrupt is disabled */
2829         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2830                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2831                 return;
2832         }
2833
2834         status = bnx2x_update_dsb_idx(bp);
2835 /*      if (status == 0)                                     */
2836 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2837
2838         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2839
2840         /* HW attentions */
2841         if (status & 0x1)
2842                 bnx2x_attn_int(bp);
2843
2844         /* CStorm events: query_stats, port delete ramrod */
2845         if (status & 0x2)
2846                 bp->stats_pending = 0;
2847
2848         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849                      IGU_INT_NOP, 1);
2850         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851                      IGU_INT_NOP, 1);
2852         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853                      IGU_INT_NOP, 1);
2854         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855                      IGU_INT_NOP, 1);
2856         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857                      IGU_INT_ENABLE, 1);
2858
2859 }
2860
2861 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862 {
2863         struct net_device *dev = dev_instance;
2864         struct bnx2x *bp = netdev_priv(dev);
2865
2866         /* Return here if interrupt is disabled */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869                 return IRQ_HANDLED;
2870         }
2871
2872         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2873
2874 #ifdef BNX2X_STOP_ON_ERROR
2875         if (unlikely(bp->panic))
2876                 return IRQ_HANDLED;
2877 #endif
2878
2879         schedule_work(&bp->sp_task);
2880
2881         return IRQ_HANDLED;
2882 }
2883
2884 /* end of slow path */
2885
2886 /* Statistics */
2887
2888 /****************************************************************************
2889 * Macros
2890 ****************************************************************************/
2891
2892 /* sum[hi:lo] += add[hi:lo] */
2893 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894         do { \
2895                 s_lo += a_lo; \
2896                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897         } while (0)
2898
2899 /* difference = minuend - subtrahend */
2900 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901         do { \
2902                 if (m_lo < s_lo) { \
2903                         /* underflow */ \
2904                         d_hi = m_hi - s_hi; \
2905                         if (d_hi > 0) { \
2906                                 /* we can 'loan' 1 */ \
2907                                 d_hi--; \
2908                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2909                         } else { \
2910                                 /* m_hi <= s_hi */ \
2911                                 d_hi = 0; \
2912                                 d_lo = 0; \
2913                         } \
2914                 } else { \
2915                         /* m_lo >= s_lo */ \
2916                         if (m_hi < s_hi) { \
2917                                 d_hi = 0; \
2918                                 d_lo = 0; \
2919                         } else { \
2920                                 /* m_hi >= s_hi */ \
2921                                 d_hi = m_hi - s_hi; \
2922                                 d_lo = m_lo - s_lo; \
2923                         } \
2924                 } \
2925         } while (0)
2926
2927 #define UPDATE_STAT64(s, t) \
2928         do { \
2929                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934                        pstats->mac_stx[1].t##_lo, diff.lo); \
2935         } while (0)
2936
2937 #define UPDATE_STAT64_NIG(s, t) \
2938         do { \
2939                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940                         diff.lo, new->s##_lo, old->s##_lo); \
2941                 ADD_64(estats->t##_hi, diff.hi, \
2942                        estats->t##_lo, diff.lo); \
2943         } while (0)
2944
2945 /* sum[hi:lo] += add */
2946 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2947         do { \
2948                 s_lo += a; \
2949                 s_hi += (s_lo < a) ? 1 : 0; \
2950         } while (0)
2951
2952 #define UPDATE_EXTEND_STAT(s) \
2953         do { \
2954                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955                               pstats->mac_stx[1].s##_lo, \
2956                               new->s); \
2957         } while (0)
2958
2959 #define UPDATE_EXTEND_TSTAT(s, t) \
2960         do { \
2961                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962                 old_tclient->s = le32_to_cpu(tclient->s); \
2963                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964         } while (0)
2965
2966 #define UPDATE_EXTEND_XSTAT(s, t) \
2967         do { \
2968                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969                 old_xclient->s = le32_to_cpu(xclient->s); \
2970                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2971         } while (0)
2972
2973 /*
2974  * General service functions
2975  */
2976
2977 static inline long bnx2x_hilo(u32 *hiref)
2978 {
2979         u32 lo = *(hiref + 1);
2980 #if (BITS_PER_LONG == 64)
2981         u32 hi = *hiref;
2982
2983         return HILO_U64(hi, lo);
2984 #else
2985         return lo;
2986 #endif
2987 }
2988
2989 /*
2990  * Init service functions
2991  */
2992
2993 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994 {
2995         if (!bp->stats_pending) {
2996                 struct eth_query_ramrod_data ramrod_data = {0};
2997                 int rc;
2998
2999                 ramrod_data.drv_counter = bp->stats_counter++;
3000                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004                                    ((u32 *)&ramrod_data)[1],
3005                                    ((u32 *)&ramrod_data)[0], 0);
3006                 if (rc == 0) {
3007                         /* stats ramrod has it's own slot on the spq */
3008                         bp->spq_left++;
3009                         bp->stats_pending = 1;
3010                 }
3011         }
3012 }
3013
3014 static void bnx2x_stats_init(struct bnx2x *bp)
3015 {
3016         int port = BP_PORT(bp);
3017
3018         bp->executer_idx = 0;
3019         bp->stats_counter = 0;
3020
3021         /* port stats */
3022         if (!BP_NOMCP(bp))
3023                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024         else
3025                 bp->port.port_stx = 0;
3026         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029         bp->port.old_nig_stats.brb_discard =
3030                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3031         bp->port.old_nig_stats.brb_truncate =
3032                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3033         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038         /* function stats */
3039         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044         bp->stats_state = STATS_STATE_DISABLED;
3045         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047 }
3048
3049 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050 {
3051         struct dmae_command *dmae = &bp->stats_dmae;
3052         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054         *stats_comp = DMAE_COMP_VAL;
3055
3056         /* loader */
3057         if (bp->executer_idx) {
3058                 int loader_idx = PMF_DMAE_C(bp);
3059
3060                 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064                                 DMAE_CMD_DST_RESET |
3065 #ifdef __BIG_ENDIAN
3066                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067 #else
3068                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3069 #endif
3070                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071                                                DMAE_CMD_PORT_0) |
3072                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076                                      sizeof(struct dmae_command) *
3077                                      (loader_idx + 1)) >> 2;
3078                 dmae->dst_addr_hi = 0;
3079                 dmae->len = sizeof(struct dmae_command) >> 2;
3080                 if (CHIP_IS_E1(bp))
3081                         dmae->len--;
3082                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083                 dmae->comp_addr_hi = 0;
3084                 dmae->comp_val = 1;
3085
3086                 *stats_comp = 0;
3087                 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089         } else if (bp->func_stx) {
3090                 *stats_comp = 0;
3091                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092         }
3093 }
3094
3095 static int bnx2x_stats_comp(struct bnx2x *bp)
3096 {
3097         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098         int cnt = 10;
3099
3100         might_sleep();
3101         while (*stats_comp != DMAE_COMP_VAL) {
3102                 if (!cnt) {
3103                         BNX2X_ERR("timeout waiting for stats finished\n");
3104                         break;
3105                 }
3106                 cnt--;
3107                 msleep(1);
3108         }
3109         return 1;
3110 }
3111
3112 /*
3113  * Statistics service functions
3114  */
3115
3116 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117 {
3118         struct dmae_command *dmae;
3119         u32 opcode;
3120         int loader_idx = PMF_DMAE_C(bp);
3121         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123         /* sanity */
3124         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125                 BNX2X_ERR("BUG!\n");
3126                 return;
3127         }
3128
3129         bp->executer_idx = 0;
3130
3131         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132                   DMAE_CMD_C_ENABLE |
3133                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134 #ifdef __BIG_ENDIAN
3135                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136 #else
3137                   DMAE_CMD_ENDIANITY_DW_SWAP |
3138 #endif
3139                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144         dmae->src_addr_lo = bp->port.port_stx >> 2;
3145         dmae->src_addr_hi = 0;
3146         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148         dmae->len = DMAE_LEN32_RD_MAX;
3149         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150         dmae->comp_addr_hi = 0;
3151         dmae->comp_val = 1;
3152
3153         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156         dmae->src_addr_hi = 0;
3157         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158                                    DMAE_LEN32_RD_MAX * 4);
3159         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160                                    DMAE_LEN32_RD_MAX * 4);
3161         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164         dmae->comp_val = DMAE_COMP_VAL;
3165
3166         *stats_comp = 0;
3167         bnx2x_hw_stats_post(bp);
3168         bnx2x_stats_comp(bp);
3169 }
3170
3171 static void bnx2x_port_stats_init(struct bnx2x *bp)
3172 {
3173         struct dmae_command *dmae;
3174         int port = BP_PORT(bp);
3175         int vn = BP_E1HVN(bp);
3176         u32 opcode;
3177         int loader_idx = PMF_DMAE_C(bp);
3178         u32 mac_addr;
3179         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181         /* sanity */
3182         if (!bp->link_vars.link_up || !bp->port.pmf) {
3183                 BNX2X_ERR("BUG!\n");
3184                 return;
3185         }
3186
3187         bp->executer_idx = 0;
3188
3189         /* MCP */
3190         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3193 #ifdef __BIG_ENDIAN
3194                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3195 #else
3196                   DMAE_CMD_ENDIANITY_DW_SWAP |
3197 #endif
3198                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199                   (vn << DMAE_CMD_E1HVN_SHIFT));
3200
3201         if (bp->port.port_stx) {
3202
3203                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204                 dmae->opcode = opcode;
3205                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3208                 dmae->dst_addr_hi = 0;
3209                 dmae->len = sizeof(struct host_port_stats) >> 2;
3210                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211                 dmae->comp_addr_hi = 0;
3212                 dmae->comp_val = 1;
3213         }
3214
3215         if (bp->func_stx) {
3216
3217                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218                 dmae->opcode = opcode;
3219                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221                 dmae->dst_addr_lo = bp->func_stx >> 2;
3222                 dmae->dst_addr_hi = 0;
3223                 dmae->len = sizeof(struct host_func_stats) >> 2;
3224                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225                 dmae->comp_addr_hi = 0;
3226                 dmae->comp_val = 1;
3227         }
3228
3229         /* MAC */
3230         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233 #ifdef __BIG_ENDIAN
3234                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235 #else
3236                   DMAE_CMD_ENDIANITY_DW_SWAP |
3237 #endif
3238                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239                   (vn << DMAE_CMD_E1HVN_SHIFT));
3240
3241         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3242
3243                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244                                    NIG_REG_INGRESS_BMAC0_MEM);
3245
3246                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3248                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249                 dmae->opcode = opcode;
3250                 dmae->src_addr_lo = (mac_addr +
3251                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252                 dmae->src_addr_hi = 0;
3253                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258                 dmae->comp_addr_hi = 0;
3259                 dmae->comp_val = 1;
3260
3261                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264                 dmae->opcode = opcode;
3265                 dmae->src_addr_lo = (mac_addr +
3266                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267                 dmae->src_addr_hi = 0;
3268                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3269                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3270                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3271                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3272                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275                 dmae->comp_addr_hi = 0;
3276                 dmae->comp_val = 1;
3277
3278         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3279
3280                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284                 dmae->opcode = opcode;
3285                 dmae->src_addr_lo = (mac_addr +
3286                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287                 dmae->src_addr_hi = 0;
3288                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292                 dmae->comp_addr_hi = 0;
3293                 dmae->comp_val = 1;
3294
3295                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297                 dmae->opcode = opcode;
3298                 dmae->src_addr_lo = (mac_addr +
3299                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300                 dmae->src_addr_hi = 0;
3301                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3302                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3303                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3304                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3305                 dmae->len = 1;
3306                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307                 dmae->comp_addr_hi = 0;
3308                 dmae->comp_val = 1;
3309
3310                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312                 dmae->opcode = opcode;
3313                 dmae->src_addr_lo = (mac_addr +
3314                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315                 dmae->src_addr_hi = 0;
3316                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3317                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3318                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3319                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3320                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322                 dmae->comp_addr_hi = 0;
3323                 dmae->comp_val = 1;
3324         }
3325
3326         /* NIG */
3327         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328         dmae->opcode = opcode;
3329         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331         dmae->src_addr_hi = 0;
3332         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336         dmae->comp_addr_hi = 0;
3337         dmae->comp_val = 1;
3338
3339         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340         dmae->opcode = opcode;
3341         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343         dmae->src_addr_hi = 0;
3344         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348         dmae->len = (2*sizeof(u32)) >> 2;
3349         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350         dmae->comp_addr_hi = 0;
3351         dmae->comp_val = 1;
3352
3353         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357 #ifdef __BIG_ENDIAN
3358                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359 #else
3360                         DMAE_CMD_ENDIANITY_DW_SWAP |
3361 #endif
3362                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363                         (vn << DMAE_CMD_E1HVN_SHIFT));
3364         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3366         dmae->src_addr_hi = 0;
3367         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371         dmae->len = (2*sizeof(u32)) >> 2;
3372         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374         dmae->comp_val = DMAE_COMP_VAL;
3375
3376         *stats_comp = 0;
3377 }
3378
3379 static void bnx2x_func_stats_init(struct bnx2x *bp)
3380 {
3381         struct dmae_command *dmae = &bp->stats_dmae;
3382         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3383
3384         /* sanity */
3385         if (!bp->func_stx) {
3386                 BNX2X_ERR("BUG!\n");
3387                 return;
3388         }
3389
3390         bp->executer_idx = 0;
3391         memset(dmae, 0, sizeof(struct dmae_command));
3392
3393         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396 #ifdef __BIG_ENDIAN
3397                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398 #else
3399                         DMAE_CMD_ENDIANITY_DW_SWAP |
3400 #endif
3401                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405         dmae->dst_addr_lo = bp->func_stx >> 2;
3406         dmae->dst_addr_hi = 0;
3407         dmae->len = sizeof(struct host_func_stats) >> 2;
3408         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410         dmae->comp_val = DMAE_COMP_VAL;
3411
3412         *stats_comp = 0;
3413 }
3414
3415 static void bnx2x_stats_start(struct bnx2x *bp)
3416 {
3417         if (bp->port.pmf)
3418                 bnx2x_port_stats_init(bp);
3419
3420         else if (bp->func_stx)
3421                 bnx2x_func_stats_init(bp);
3422
3423         bnx2x_hw_stats_post(bp);
3424         bnx2x_storm_stats_post(bp);
3425 }
3426
3427 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428 {
3429         bnx2x_stats_comp(bp);
3430         bnx2x_stats_pmf_update(bp);
3431         bnx2x_stats_start(bp);
3432 }
3433
3434 static void bnx2x_stats_restart(struct bnx2x *bp)
3435 {
3436         bnx2x_stats_comp(bp);
3437         bnx2x_stats_start(bp);
3438 }
3439
3440 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441 {
3442         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444         struct regpair diff;
3445
3446         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3452         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3453         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458         UPDATE_STAT64(tx_stat_gt127,
3459                                 tx_stat_etherstatspkts65octetsto127octets);
3460         UPDATE_STAT64(tx_stat_gt255,
3461                                 tx_stat_etherstatspkts128octetsto255octets);
3462         UPDATE_STAT64(tx_stat_gt511,
3463                                 tx_stat_etherstatspkts256octetsto511octets);
3464         UPDATE_STAT64(tx_stat_gt1023,
3465                                 tx_stat_etherstatspkts512octetsto1023octets);
3466         UPDATE_STAT64(tx_stat_gt1518,
3467                                 tx_stat_etherstatspkts1024octetsto1522octets);
3468         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472         UPDATE_STAT64(tx_stat_gterr,
3473                                 tx_stat_dot3statsinternalmactransmiterrors);
3474         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475 }
3476
3477 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478 {
3479         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513 }
3514
3515 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516 {
3517         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518         struct nig_stats *old = &(bp->port.old_nig_stats);
3519         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521         struct regpair diff;
3522
3523         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524                 bnx2x_bmac_stats_update(bp);
3525
3526         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527                 bnx2x_emac_stats_update(bp);
3528
3529         else { /* unreached */
3530                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531                 return -1;
3532         }
3533
3534         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535                       new->brb_discard - old->brb_discard);
3536         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537                       new->brb_truncate - old->brb_truncate);
3538
3539         UPDATE_STAT64_NIG(egress_mac_pkt0,
3540                                         etherstatspkts1024octetsto1522octets);
3541         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3542
3543         memcpy(old, new, sizeof(struct nig_stats));
3544
3545         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546                sizeof(struct mac_stx));
3547         estats->brb_drop_hi = pstats->brb_drop_hi;
3548         estats->brb_drop_lo = pstats->brb_drop_lo;
3549
3550         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3551
3552         return 0;
3553 }
3554
3555 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3556 {
3557         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3558         int cl_id = BP_CL_ID(bp);
3559         struct tstorm_per_port_stats *tport =
3560                                 &stats->tstorm_common.port_statistics;
3561         struct tstorm_per_client_stats *tclient =
3562                         &stats->tstorm_common.client_statistics[cl_id];
3563         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3564         struct xstorm_per_client_stats *xclient =
3565                         &stats->xstorm_common.client_statistics[cl_id];
3566         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3569         u32 diff;
3570
3571         /* are storm stats valid? */
3572         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573                                                         bp->stats_counter) {
3574                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575                    "  tstorm counter (%d) != stats_counter (%d)\n",
3576                    tclient->stats_counter, bp->stats_counter);
3577                 return -1;
3578         }
3579         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580                                                         bp->stats_counter) {
3581                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582                    "  xstorm counter (%d) != stats_counter (%d)\n",
3583                    xclient->stats_counter, bp->stats_counter);
3584                 return -2;
3585         }
3586
3587         fstats->total_bytes_received_hi =
3588         fstats->valid_bytes_received_hi =
3589                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3590         fstats->total_bytes_received_lo =
3591         fstats->valid_bytes_received_lo =
3592                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3593
3594         estats->error_bytes_received_hi =
3595                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596         estats->error_bytes_received_lo =
3597                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598         ADD_64(estats->error_bytes_received_hi,
3599                estats->rx_stat_ifhcinbadoctets_hi,
3600                estats->error_bytes_received_lo,
3601                estats->rx_stat_ifhcinbadoctets_lo);
3602
3603         ADD_64(fstats->total_bytes_received_hi,
3604                estats->error_bytes_received_hi,
3605                fstats->total_bytes_received_lo,
3606                estats->error_bytes_received_lo);
3607
3608         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3609         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3610                                 total_multicast_packets_received);
3611         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3612                                 total_broadcast_packets_received);
3613
3614         fstats->total_bytes_transmitted_hi =
3615                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3616         fstats->total_bytes_transmitted_lo =
3617                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620                                 total_unicast_packets_transmitted);
3621         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622                                 total_multicast_packets_transmitted);
3623         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624                                 total_broadcast_packets_transmitted);
3625
3626         memcpy(estats, &(fstats->total_bytes_received_hi),
3627                sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631         estats->brb_truncate_discard =
3632                                 le32_to_cpu(tport->brb_truncate_discard);
3633         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635         old_tclient->rcv_unicast_bytes.hi =
3636                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3637         old_tclient->rcv_unicast_bytes.lo =
3638                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3639         old_tclient->rcv_broadcast_bytes.hi =
3640                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3641         old_tclient->rcv_broadcast_bytes.lo =
3642                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3643         old_tclient->rcv_multicast_bytes.hi =
3644                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3645         old_tclient->rcv_multicast_bytes.lo =
3646                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3647         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3648
3649         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650         old_tclient->packets_too_big_discard =
3651                                 le32_to_cpu(tclient->packets_too_big_discard);
3652         estats->no_buff_discard =
3653         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657         old_xclient->unicast_bytes_sent.hi =
3658                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659         old_xclient->unicast_bytes_sent.lo =
3660                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661         old_xclient->multicast_bytes_sent.hi =
3662                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663         old_xclient->multicast_bytes_sent.lo =
3664                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665         old_xclient->broadcast_bytes_sent.hi =
3666                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667         old_xclient->broadcast_bytes_sent.lo =
3668                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3671
3672         return 0;
3673 }
3674
3675 static void bnx2x_net_stats_update(struct bnx2x *bp)
3676 {
3677         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3679         struct net_device_stats *nstats = &bp->dev->stats;
3680
3681         nstats->rx_packets =
3682                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686         nstats->tx_packets =
3687                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
3691         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3692
3693         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3694
3695         nstats->rx_dropped = old_tclient->checksum_discard +
3696                              estats->mac_discard;
3697         nstats->tx_dropped = 0;
3698
3699         nstats->multicast =
3700                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
3702         nstats->collisions =
3703                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3704                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705                         estats->tx_stat_dot3statslatecollisions_lo +
3706                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3707
3708         estats->jabber_packets_received =
3709                                 old_tclient->packets_too_big_discard +
3710                                 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712         nstats->rx_length_errors =
3713                                 estats->rx_stat_etherstatsundersizepkts_lo +
3714                                 estats->jabber_packets_received;
3715         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3716         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3719         nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721         nstats->rx_errors = nstats->rx_length_errors +
3722                             nstats->rx_over_errors +
3723                             nstats->rx_crc_errors +
3724                             nstats->rx_frame_errors +
3725                             nstats->rx_fifo_errors +
3726                             nstats->rx_missed_errors;
3727
3728         nstats->tx_aborted_errors =
3729                         estats->tx_stat_dot3statslatecollisions_lo +
3730                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3731         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3732         nstats->tx_fifo_errors = 0;
3733         nstats->tx_heartbeat_errors = 0;
3734         nstats->tx_window_errors = 0;
3735
3736         nstats->tx_errors = nstats->tx_aborted_errors +
3737                             nstats->tx_carrier_errors;
3738 }
3739
3740 static void bnx2x_stats_update(struct bnx2x *bp)
3741 {
3742         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743         int update = 0;
3744
3745         if (*stats_comp != DMAE_COMP_VAL)
3746                 return;
3747
3748         if (bp->port.pmf)
3749                 update = (bnx2x_hw_stats_update(bp) == 0);
3750
3751         update |= (bnx2x_storm_stats_update(bp) == 0);
3752
3753         if (update)
3754                 bnx2x_net_stats_update(bp);
3755
3756         else {
3757                 if (bp->stats_pending) {
3758                         bp->stats_pending++;
3759                         if (bp->stats_pending == 3) {
3760                                 BNX2X_ERR("stats not updated for 3 times\n");
3761                                 bnx2x_panic();
3762                                 return;
3763                         }
3764                 }
3765         }
3766
3767         if (bp->msglevel & NETIF_MSG_TIMER) {
3768                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3770                 struct net_device_stats *nstats = &bp->dev->stats;
3771                 int i;
3772
3773                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3775                                   "  tx pkt (%lx)\n",
3776                        bnx2x_tx_avail(bp->fp),
3777                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3778                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3779                                   "  rx pkt (%lx)\n",
3780                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781                              bp->fp->rx_comp_cons),
3782                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3783                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3784                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3785                        estats->driver_xoff, estats->brb_drop_lo);
3786                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3787                         "packets_too_big_discard %u  no_buff_discard %u  "
3788                         "mac_discard %u  mac_filter_discard %u  "
3789                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3790                         "ttl0_discard %u\n",
3791                        old_tclient->checksum_discard,
3792                        old_tclient->packets_too_big_discard,
3793                        old_tclient->no_buff_discard, estats->mac_discard,
3794                        estats->mac_filter_discard, estats->xxoverflow_discard,
3795                        estats->brb_truncate_discard,
3796                        old_tclient->ttl0_discard);
3797
3798                 for_each_queue(bp, i) {
3799                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800                                bnx2x_fp(bp, i, tx_pkt),
3801                                bnx2x_fp(bp, i, rx_pkt),
3802                                bnx2x_fp(bp, i, rx_calls));
3803                 }
3804         }
3805
3806         bnx2x_hw_stats_post(bp);
3807         bnx2x_storm_stats_post(bp);
3808 }
3809
3810 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811 {
3812         struct dmae_command *dmae;
3813         u32 opcode;
3814         int loader_idx = PMF_DMAE_C(bp);
3815         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3816
3817         bp->executer_idx = 0;
3818
3819         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820                   DMAE_CMD_C_ENABLE |
3821                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3822 #ifdef __BIG_ENDIAN
3823                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3824 #else
3825                   DMAE_CMD_ENDIANITY_DW_SWAP |
3826 #endif
3827                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830         if (bp->port.port_stx) {
3831
3832                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833                 if (bp->func_stx)
3834                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835                 else
3836                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3840                 dmae->dst_addr_hi = 0;
3841                 dmae->len = sizeof(struct host_port_stats) >> 2;
3842                 if (bp->func_stx) {
3843                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844                         dmae->comp_addr_hi = 0;
3845                         dmae->comp_val = 1;
3846                 } else {
3847                         dmae->comp_addr_lo =
3848                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849                         dmae->comp_addr_hi =
3850                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851                         dmae->comp_val = DMAE_COMP_VAL;
3852
3853                         *stats_comp = 0;
3854                 }
3855         }
3856
3857         if (bp->func_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863                 dmae->dst_addr_lo = bp->func_stx >> 2;
3864                 dmae->dst_addr_hi = 0;
3865                 dmae->len = sizeof(struct host_func_stats) >> 2;
3866                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868                 dmae->comp_val = DMAE_COMP_VAL;
3869
3870                 *stats_comp = 0;
3871         }
3872 }
3873
3874 static void bnx2x_stats_stop(struct bnx2x *bp)
3875 {
3876         int update = 0;
3877
3878         bnx2x_stats_comp(bp);
3879
3880         if (bp->port.pmf)
3881                 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883         update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885         if (update) {
3886                 bnx2x_net_stats_update(bp);
3887
3888                 if (bp->port.pmf)
3889                         bnx2x_port_stats_stop(bp);
3890
3891                 bnx2x_hw_stats_post(bp);
3892                 bnx2x_stats_comp(bp);
3893         }
3894 }
3895
3896 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897 {
3898 }
3899
3900 static const struct {
3901         void (*action)(struct bnx2x *bp);
3902         enum bnx2x_stats_state next_state;
3903 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904 /* state        event   */
3905 {
3906 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3908 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910 },
3911 {
3912 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3913 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3914 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3915 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3916 }
3917 };
3918
3919 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920 {
3921         enum bnx2x_stats_state state = bp->stats_state;
3922
3923         bnx2x_stats_stm[state][event].action(bp);
3924         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928                    state, event, bp->stats_state);
3929 }
3930
3931 static void bnx2x_timer(unsigned long data)
3932 {
3933         struct bnx2x *bp = (struct bnx2x *) data;
3934
3935         if (!netif_running(bp->dev))
3936                 return;
3937
3938         if (atomic_read(&bp->intr_sem) != 0)
3939                 goto timer_restart;
3940
3941         if (poll) {
3942                 struct bnx2x_fastpath *fp = &bp->fp[0];
3943                 int rc;
3944
3945                 bnx2x_tx_int(fp, 1000);
3946                 rc = bnx2x_rx_int(fp, 1000);
3947         }
3948
3949         if (!BP_NOMCP(bp)) {
3950                 int func = BP_FUNC(bp);
3951                 u32 drv_pulse;
3952                 u32 mcp_pulse;
3953
3954                 ++bp->fw_drv_pulse_wr_seq;
3955                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956                 /* TBD - add SYSTEM_TIME */
3957                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3958                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3959
3960                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3961                              MCP_PULSE_SEQ_MASK);
3962                 /* The delta between driver pulse and mcp response
3963                  * should be 1 (before mcp response) or 0 (after mcp response)
3964                  */
3965                 if ((drv_pulse != mcp_pulse) &&
3966                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967                         /* someone lost a heartbeat... */
3968                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969                                   drv_pulse, mcp_pulse);
3970                 }
3971         }
3972
3973         if ((bp->state == BNX2X_STATE_OPEN) ||
3974             (bp->state == BNX2X_STATE_DISABLED))
3975                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3976
3977 timer_restart:
3978         mod_timer(&bp->timer, jiffies + bp->current_interval);
3979 }
3980
3981 /* end of Statistics */
3982
3983 /* nic init */
3984
3985 /*
3986  * nic init service functions
3987  */
3988
3989 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3990 {
3991         int port = BP_PORT(bp);
3992
3993         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3995                         sizeof(struct ustorm_status_block)/4);
3996         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3998                         sizeof(struct cstorm_status_block)/4);
3999 }
4000
4001 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4002                           dma_addr_t mapping, int sb_id)
4003 {
4004         int port = BP_PORT(bp);
4005         int func = BP_FUNC(bp);
4006         int index;
4007         u64 section;
4008
4009         /* USTORM */
4010         section = ((u64)mapping) + offsetof(struct host_status_block,
4011                                             u_status_block);
4012         sb->u_status_block.status_block_id = sb_id;
4013
4014         REG_WR(bp, BAR_USTRORM_INTMEM +
4015                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4016         REG_WR(bp, BAR_USTRORM_INTMEM +
4017                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4018                U64_HI(section));
4019         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4021
4022         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4024                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4025
4026         /* CSTORM */
4027         section = ((u64)mapping) + offsetof(struct host_status_block,
4028                                             c_status_block);
4029         sb->c_status_block.status_block_id = sb_id;
4030
4031         REG_WR(bp, BAR_CSTRORM_INTMEM +
4032                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4033         REG_WR(bp, BAR_CSTRORM_INTMEM +
4034                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4035                U64_HI(section));
4036         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4038
4039         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4041                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044 }
4045
4046 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047 {
4048         int func = BP_FUNC(bp);
4049
4050         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052                         sizeof(struct ustorm_def_status_block)/4);
4053         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055                         sizeof(struct cstorm_def_status_block)/4);
4056         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058                         sizeof(struct xstorm_def_status_block)/4);
4059         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061                         sizeof(struct tstorm_def_status_block)/4);
4062 }
4063
4064 static void bnx2x_init_def_sb(struct bnx2x *bp,
4065                               struct host_def_status_block *def_sb,
4066                               dma_addr_t mapping, int sb_id)
4067 {
4068         int port = BP_PORT(bp);
4069         int func = BP_FUNC(bp);
4070         int index, val, reg_offset;
4071         u64 section;
4072
4073         /* ATTN */
4074         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075                                             atten_status_block);
4076         def_sb->atten_status_block.status_block_id = sb_id;
4077
4078         bp->attn_state = 0;
4079
4080         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4081                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4082
4083         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4084                 bp->attn_group[index].sig[0] = REG_RD(bp,
4085                                                      reg_offset + 0x10*index);
4086                 bp->attn_group[index].sig[1] = REG_RD(bp,
4087                                                reg_offset + 0x4 + 0x10*index);
4088                 bp->attn_group[index].sig[2] = REG_RD(bp,
4089                                                reg_offset + 0x8 + 0x10*index);
4090                 bp->attn_group[index].sig[3] = REG_RD(bp,
4091                                                reg_offset + 0xc + 0x10*index);
4092         }
4093
4094         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4095                              HC_REG_ATTN_MSG0_ADDR_L);
4096
4097         REG_WR(bp, reg_offset, U64_LO(section));
4098         REG_WR(bp, reg_offset + 4, U64_HI(section));
4099
4100         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4101
4102         val = REG_RD(bp, reg_offset);
4103         val |= sb_id;
4104         REG_WR(bp, reg_offset, val);
4105
4106         /* USTORM */
4107         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4108                                             u_def_status_block);
4109         def_sb->u_def_status_block.status_block_id = sb_id;
4110
4111         REG_WR(bp, BAR_USTRORM_INTMEM +
4112                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4113         REG_WR(bp, BAR_USTRORM_INTMEM +
4114                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4115                U64_HI(section));
4116         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4117                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4118
4119         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4120                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4121                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4122
4123         /* CSTORM */
4124         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4125                                             c_def_status_block);
4126         def_sb->c_def_status_block.status_block_id = sb_id;
4127
4128         REG_WR(bp, BAR_CSTRORM_INTMEM +
4129                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4130         REG_WR(bp, BAR_CSTRORM_INTMEM +
4131                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4132                U64_HI(section));
4133         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4134                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4135
4136         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4137                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4138                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4139
4140         /* TSTORM */
4141         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4142                                             t_def_status_block);
4143         def_sb->t_def_status_block.status_block_id = sb_id;
4144
4145         REG_WR(bp, BAR_TSTRORM_INTMEM +
4146                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4147         REG_WR(bp, BAR_TSTRORM_INTMEM +
4148                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4149                U64_HI(section));
4150         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4151                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4152
4153         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4154                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4155                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4156
4157         /* XSTORM */
4158         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4159                                             x_def_status_block);
4160         def_sb->x_def_status_block.status_block_id = sb_id;
4161
4162         REG_WR(bp, BAR_XSTRORM_INTMEM +
4163                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4164         REG_WR(bp, BAR_XSTRORM_INTMEM +
4165                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4166                U64_HI(section));
4167         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4168                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4169
4170         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4171                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4172                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4173
4174         bp->stats_pending = 0;
4175         bp->set_mac_pending = 0;
4176
4177         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4178 }
4179
4180 static void bnx2x_update_coalesce(struct bnx2x *bp)
4181 {
4182         int port = BP_PORT(bp);
4183         int i;
4184
4185         for_each_queue(bp, i) {
4186                 int sb_id = bp->fp[i].sb_id;
4187
4188                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4189                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4190                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4191                                                     U_SB_ETH_RX_CQ_INDEX),
4192                         bp->rx_ticks/12);
4193                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4194                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4195                                                      U_SB_ETH_RX_CQ_INDEX),
4196                          bp->rx_ticks ? 0 : 1);
4197                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4198                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4199                                                      U_SB_ETH_RX_BD_INDEX),
4200                          bp->rx_ticks ? 0 : 1);
4201
4202                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4203                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4204                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4205                                                     C_SB_ETH_TX_CQ_INDEX),
4206                         bp->tx_ticks/12);
4207                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4208                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4209                                                      C_SB_ETH_TX_CQ_INDEX),
4210                          bp->tx_ticks ? 0 : 1);
4211         }
4212 }
4213
4214 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4215                                        struct bnx2x_fastpath *fp, int last)
4216 {
4217         int i;
4218
4219         for (i = 0; i < last; i++) {
4220                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4221                 struct sk_buff *skb = rx_buf->skb;
4222
4223                 if (skb == NULL) {
4224                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4225                         continue;
4226                 }
4227
4228                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4229                         pci_unmap_single(bp->pdev,
4230                                          pci_unmap_addr(rx_buf, mapping),
4231                                          bp->rx_buf_use_size,
4232                                          PCI_DMA_FROMDEVICE);
4233
4234                 dev_kfree_skb(skb);
4235                 rx_buf->skb = NULL;
4236         }
4237 }
4238
4239 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4240 {
4241         int func = BP_FUNC(bp);
4242         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4243                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4244         u16 ring_prod, cqe_ring_prod;
4245         int i, j;
4246
4247         bp->rx_buf_use_size = bp->dev->mtu;
4248         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4249         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4250
4251         if (bp->flags & TPA_ENABLE_FLAG) {
4252                 DP(NETIF_MSG_IFUP,
4253                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4254                    bp->rx_buf_use_size, bp->rx_buf_size,
4255                    bp->dev->mtu + ETH_OVREHEAD);
4256
4257                 for_each_queue(bp, j) {
4258                         struct bnx2x_fastpath *fp = &bp->fp[j];
4259
4260                         for (i = 0; i < max_agg_queues; i++) {
4261                                 fp->tpa_pool[i].skb =
4262                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4263                                 if (!fp->tpa_pool[i].skb) {
4264                                         BNX2X_ERR("Failed to allocate TPA "
4265                                                   "skb pool for queue[%d] - "
4266                                                   "disabling TPA on this "
4267                                                   "queue!\n", j);
4268                                         bnx2x_free_tpa_pool(bp, fp, i);
4269                                         fp->disable_tpa = 1;
4270                                         break;
4271                                 }
4272                                 pci_unmap_addr_set((struct sw_rx_bd *)
4273                                                         &bp->fp->tpa_pool[i],
4274                                                    mapping, 0);
4275                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4276                         }
4277                 }
4278         }
4279
4280         for_each_queue(bp, j) {
4281                 struct bnx2x_fastpath *fp = &bp->fp[j];
4282
4283                 fp->rx_bd_cons = 0;
4284                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4285                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4286
4287                 /* "next page" elements initialization */
4288                 /* SGE ring */
4289                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4290                         struct eth_rx_sge *sge;
4291
4292                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4293                         sge->addr_hi =
4294                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4295                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4296                         sge->addr_lo =
4297                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4298                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4299                 }
4300
4301                 bnx2x_init_sge_ring_bit_mask(fp);
4302
4303                 /* RX BD ring */
4304                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4305                         struct eth_rx_bd *rx_bd;
4306
4307                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4308                         rx_bd->addr_hi =
4309                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4310                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4311                         rx_bd->addr_lo =
4312                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4313                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4314                 }
4315
4316                 /* CQ ring */
4317                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4318                         struct eth_rx_cqe_next_page *nextpg;
4319
4320                         nextpg = (struct eth_rx_cqe_next_page *)
4321                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4322                         nextpg->addr_hi =
4323                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4324                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4325                         nextpg->addr_lo =
4326                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4327                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4328                 }
4329
4330                 /* Allocate SGEs and initialize the ring elements */
4331                 for (i = 0, ring_prod = 0;
4332                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4333
4334                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4335                                 BNX2X_ERR("was only able to allocate "
4336                                           "%d rx sges\n", i);
4337                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4338                                 /* Cleanup already allocated elements */
4339                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4340                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4341                                 fp->disable_tpa = 1;
4342                                 ring_prod = 0;
4343                                 break;
4344                         }
4345                         ring_prod = NEXT_SGE_IDX(ring_prod);
4346                 }
4347                 fp->rx_sge_prod = ring_prod;
4348
4349                 /* Allocate BDs and initialize BD ring */
4350                 fp->rx_comp_cons = 0;
4351                 cqe_ring_prod = ring_prod = 0;
4352                 for (i = 0; i < bp->rx_ring_size; i++) {
4353                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354                                 BNX2X_ERR("was only able to allocate "
4355                                           "%d rx skbs\n", i);
4356                                 bp->eth_stats.rx_skb_alloc_failed++;
4357                                 break;
4358                         }
4359                         ring_prod = NEXT_RX_IDX(ring_prod);
4360                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4361                         WARN_ON(ring_prod <= i);
4362                 }
4363
4364                 fp->rx_bd_prod = ring_prod;
4365                 /* must not have more available CQEs than BDs */
4366                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4367                                        cqe_ring_prod);
4368                 fp->rx_pkt = fp->rx_calls = 0;
4369
4370                 /* Warning!
4371                  * this will generate an interrupt (to the TSTORM)
4372                  * must only be done after chip is initialized
4373                  */
4374                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4375                                      fp->rx_sge_prod);
4376                 if (j != 0)
4377                         continue;
4378
4379                 REG_WR(bp, BAR_USTRORM_INTMEM +
4380                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4381                        U64_LO(fp->rx_comp_mapping));
4382                 REG_WR(bp, BAR_USTRORM_INTMEM +
4383                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4384                        U64_HI(fp->rx_comp_mapping));
4385         }
4386 }
4387
4388 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4389 {
4390         int i, j;
4391
4392         for_each_queue(bp, j) {
4393                 struct bnx2x_fastpath *fp = &bp->fp[j];
4394
4395                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396                         struct eth_tx_bd *tx_bd =
4397                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4398
4399                         tx_bd->addr_hi =
4400                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4401                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4402                         tx_bd->addr_lo =
4403                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4404                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4405                 }
4406
4407                 fp->tx_pkt_prod = 0;
4408                 fp->tx_pkt_cons = 0;
4409                 fp->tx_bd_prod = 0;
4410                 fp->tx_bd_cons = 0;
4411                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412                 fp->tx_pkt = 0;
4413         }
4414 }
4415
4416 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4417 {
4418         int func = BP_FUNC(bp);
4419
4420         spin_lock_init(&bp->spq_lock);
4421
4422         bp->spq_left = MAX_SPQ_PENDING;
4423         bp->spq_prod_idx = 0;
4424         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425         bp->spq_prod_bd = bp->spq;
4426         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4427
4428         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4429                U64_LO(bp->spq_mapping));
4430         REG_WR(bp,
4431                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4432                U64_HI(bp->spq_mapping));
4433
4434         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4435                bp->spq_prod_idx);
4436 }
4437
4438 static void bnx2x_init_context(struct bnx2x *bp)
4439 {
4440         int i;
4441
4442         for_each_queue(bp, i) {
4443                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444                 struct bnx2x_fastpath *fp = &bp->fp[i];
4445                 u8 sb_id = FP_SB_ID(fp);
4446
4447                 context->xstorm_st_context.tx_bd_page_base_hi =
4448                                                 U64_HI(fp->tx_desc_mapping);
4449                 context->xstorm_st_context.tx_bd_page_base_lo =
4450                                                 U64_LO(fp->tx_desc_mapping);
4451                 context->xstorm_st_context.db_data_addr_hi =
4452                                                 U64_HI(fp->tx_prods_mapping);
4453                 context->xstorm_st_context.db_data_addr_lo =
4454                                                 U64_LO(fp->tx_prods_mapping);
4455                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4457
4458                 context->ustorm_st_context.common.sb_index_numbers =
4459                                                 BNX2X_RX_SB_INDEX_NUM;
4460                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461                 context->ustorm_st_context.common.status_block_id = sb_id;
4462                 context->ustorm_st_context.common.flags =
4463                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464                 context->ustorm_st_context.common.mc_alignment_size = 64;
4465                 context->ustorm_st_context.common.bd_buff_size =
4466                                                 bp->rx_buf_use_size;
4467                 context->ustorm_st_context.common.bd_page_base_hi =
4468                                                 U64_HI(fp->rx_desc_mapping);
4469                 context->ustorm_st_context.common.bd_page_base_lo =
4470                                                 U64_LO(fp->rx_desc_mapping);
4471                 if (!fp->disable_tpa) {
4472                         context->ustorm_st_context.common.flags |=
4473                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475                         context->ustorm_st_context.common.sge_buff_size =
4476                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477                         context->ustorm_st_context.common.sge_page_base_hi =
4478                                                 U64_HI(fp->rx_sge_mapping);
4479                         context->ustorm_st_context.common.sge_page_base_lo =
4480                                                 U64_LO(fp->rx_sge_mapping);
4481                 }
4482
4483                 context->cstorm_st_context.sb_index_number =
4484                                                 C_SB_ETH_TX_CQ_INDEX;
4485                 context->cstorm_st_context.status_block_id = sb_id;
4486
4487                 context->xstorm_ag_context.cdu_reserved =
4488                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489                                                CDU_REGION_NUMBER_XCM_AG,
4490                                                ETH_CONNECTION_TYPE);
4491                 context->ustorm_ag_context.cdu_usage =
4492                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493                                                CDU_REGION_NUMBER_UCM_AG,
4494                                                ETH_CONNECTION_TYPE);
4495         }
4496 }
4497
4498 static void bnx2x_init_ind_table(struct bnx2x *bp)
4499 {
4500         int port = BP_PORT(bp);
4501         int i;
4502
4503         if (!is_multi(bp))
4504                 return;
4505
4506         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4507         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4508                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4510                         i % bp->num_queues);
4511
4512         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4513 }
4514
4515 static void bnx2x_set_client_config(struct bnx2x *bp)
4516 {
4517         struct tstorm_eth_client_config tstorm_client = {0};
4518         int port = BP_PORT(bp);
4519         int i;
4520
4521         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4522         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4523         tstorm_client.config_flags =
4524                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4525 #ifdef BCM_VLAN
4526         if (bp->rx_mode && bp->vlgrp) {
4527                 tstorm_client.config_flags |=
4528                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4530         }
4531 #endif
4532
4533         if (bp->flags & TPA_ENABLE_FLAG) {
4534                 tstorm_client.max_sges_for_packet =
4535                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536                 tstorm_client.max_sges_for_packet =
4537                         ((tstorm_client.max_sges_for_packet +
4538                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539                         PAGES_PER_SGE_SHIFT;
4540
4541                 tstorm_client.config_flags |=
4542                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4543         }
4544
4545         for_each_queue(bp, i) {
4546                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4547                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4548                        ((u32 *)&tstorm_client)[0]);
4549                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4550                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4551                        ((u32 *)&tstorm_client)[1]);
4552         }
4553
4554         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4556 }
4557
4558 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4559 {
4560         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4561         int mode = bp->rx_mode;
4562         int mask = (1 << BP_L_ID(bp));
4563         int func = BP_FUNC(bp);
4564         int i;
4565
4566         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4567
4568         switch (mode) {
4569         case BNX2X_RX_MODE_NONE: /* no Rx */
4570                 tstorm_mac_filter.ucast_drop_all = mask;
4571                 tstorm_mac_filter.mcast_drop_all = mask;
4572                 tstorm_mac_filter.bcast_drop_all = mask;
4573                 break;
4574         case BNX2X_RX_MODE_NORMAL:
4575                 tstorm_mac_filter.bcast_accept_all = mask;
4576                 break;
4577         case BNX2X_RX_MODE_ALLMULTI:
4578                 tstorm_mac_filter.mcast_accept_all = mask;
4579                 tstorm_mac_filter.bcast_accept_all = mask;
4580                 break;
4581         case BNX2X_RX_MODE_PROMISC:
4582                 tstorm_mac_filter.ucast_accept_all = mask;
4583                 tstorm_mac_filter.mcast_accept_all = mask;
4584                 tstorm_mac_filter.bcast_accept_all = mask;
4585                 break;
4586         default:
4587                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4588                 break;
4589         }
4590
4591         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4593                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4594                        ((u32 *)&tstorm_mac_filter)[i]);
4595
4596 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4597                    ((u32 *)&tstorm_mac_filter)[i]); */
4598         }
4599
4600         if (mode != BNX2X_RX_MODE_NONE)
4601                 bnx2x_set_client_config(bp);
4602 }
4603
4604 static void bnx2x_init_internal_common(struct bnx2x *bp)
4605 {
4606         int i;
4607
4608         /* Zero this manually as its initialization is
4609            currently missing in the initTool */
4610         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4611                 REG_WR(bp, BAR_USTRORM_INTMEM +
4612                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4613 }
4614
4615 static void bnx2x_init_internal_port(struct bnx2x *bp)
4616 {
4617         int port = BP_PORT(bp);
4618
4619         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4620         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 }
4624
4625 static void bnx2x_init_internal_func(struct bnx2x *bp)
4626 {
4627         struct tstorm_eth_function_common_config tstorm_config = {0};
4628         struct stats_indication_flags stats_flags = {0};
4629         int port = BP_PORT(bp);
4630         int func = BP_FUNC(bp);
4631         int i;
4632         u16 max_agg_size;
4633
4634         if (is_multi(bp)) {
4635                 tstorm_config.config_flags = MULTI_FLAGS;
4636                 tstorm_config.rss_result_mask = MULTI_MASK;
4637         }
4638
4639         tstorm_config.leading_client_id = BP_L_ID(bp);
4640
4641         REG_WR(bp, BAR_TSTRORM_INTMEM +
4642                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4643                (*(u32 *)&tstorm_config));
4644
4645         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4646         bnx2x_set_storm_rx_mode(bp);
4647
4648         /* reset xstorm per client statistics */
4649         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4650                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4651                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4652                        i*4, 0);
4653         }
4654         /* reset tstorm per client statistics */
4655         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4656                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4657                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4658                        i*4, 0);
4659         }
4660
4661         /* Init statistics related context */
4662         stats_flags.collect_eth = 1;
4663
4664         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4665                ((u32 *)&stats_flags)[0]);
4666         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4667                ((u32 *)&stats_flags)[1]);
4668
4669         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4670                ((u32 *)&stats_flags)[0]);
4671         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4672                ((u32 *)&stats_flags)[1]);
4673
4674         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4675                ((u32 *)&stats_flags)[0]);
4676         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4677                ((u32 *)&stats_flags)[1]);
4678
4679         REG_WR(bp, BAR_XSTRORM_INTMEM +
4680                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682         REG_WR(bp, BAR_XSTRORM_INTMEM +
4683                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686         REG_WR(bp, BAR_TSTRORM_INTMEM +
4687                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4688                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4689         REG_WR(bp, BAR_TSTRORM_INTMEM +
4690                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4691                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4692
4693         if (CHIP_IS_E1H(bp)) {
4694                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4695                         IS_E1HMF(bp));
4696                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4697                         IS_E1HMF(bp));
4698                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4699                         IS_E1HMF(bp));
4700                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4701                         IS_E1HMF(bp));
4702
4703                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4704                          bp->e1hov);
4705         }
4706
4707         /* Init CQ ring mapping and aggregation size */
4708         max_agg_size = min((u32)(bp->rx_buf_use_size +
4709                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4710                            (u32)0xffff);
4711         for_each_queue(bp, i) {
4712                 struct bnx2x_fastpath *fp = &bp->fp[i];
4713
4714                 REG_WR(bp, BAR_USTRORM_INTMEM +
4715                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4716                        U64_LO(fp->rx_comp_mapping));
4717                 REG_WR(bp, BAR_USTRORM_INTMEM +
4718                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4719                        U64_HI(fp->rx_comp_mapping));
4720
4721                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4722                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4723                          max_agg_size);
4724         }
4725 }
4726
4727 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4728 {
4729         switch (load_code) {
4730         case FW_MSG_CODE_DRV_LOAD_COMMON:
4731                 bnx2x_init_internal_common(bp);
4732                 /* no break */
4733
4734         case FW_MSG_CODE_DRV_LOAD_PORT:
4735                 bnx2x_init_internal_port(bp);
4736                 /* no break */
4737
4738         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4739                 bnx2x_init_internal_func(bp);
4740                 break;
4741
4742         default:
4743                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4744                 break;
4745         }
4746 }
4747
4748 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4749 {
4750         int i;
4751
4752         for_each_queue(bp, i) {
4753                 struct bnx2x_fastpath *fp = &bp->fp[i];
4754
4755                 fp->bp = bp;
4756                 fp->state = BNX2X_FP_STATE_CLOSED;
4757                 fp->index = i;
4758                 fp->cl_id = BP_L_ID(bp) + i;
4759                 fp->sb_id = fp->cl_id;
4760                 DP(NETIF_MSG_IFUP,
4761                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4762                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4763                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4764                               FP_SB_ID(fp));
4765                 bnx2x_update_fpsb_idx(fp);
4766         }
4767
4768         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4769                           DEF_SB_ID);
4770         bnx2x_update_dsb_idx(bp);
4771         bnx2x_update_coalesce(bp);
4772         bnx2x_init_rx_rings(bp);
4773         bnx2x_init_tx_ring(bp);
4774         bnx2x_init_sp_ring(bp);
4775         bnx2x_init_context(bp);
4776         bnx2x_init_internal(bp, load_code);
4777         bnx2x_init_ind_table(bp);
4778         bnx2x_int_enable(bp);
4779 }
4780
4781 /* end of nic init */
4782
4783 /*
4784  * gzip service functions
4785  */
4786
4787 static int bnx2x_gunzip_init(struct bnx2x *bp)
4788 {
4789         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4790                                               &bp->gunzip_mapping);
4791         if (bp->gunzip_buf  == NULL)
4792                 goto gunzip_nomem1;
4793
4794         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4795         if (bp->strm  == NULL)
4796                 goto gunzip_nomem2;
4797
4798         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4799                                       GFP_KERNEL);
4800         if (bp->strm->workspace == NULL)
4801                 goto gunzip_nomem3;
4802
4803         return 0;
4804
4805 gunzip_nomem3:
4806         kfree(bp->strm);
4807         bp->strm = NULL;
4808
4809 gunzip_nomem2:
4810         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4811                             bp->gunzip_mapping);
4812         bp->gunzip_buf = NULL;
4813
4814 gunzip_nomem1:
4815         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4816                " un-compression\n", bp->dev->name);
4817         return -ENOMEM;
4818 }
4819
4820 static void bnx2x_gunzip_end(struct bnx2x *bp)
4821 {
4822         kfree(bp->strm->workspace);
4823
4824         kfree(bp->strm);
4825         bp->strm = NULL;
4826
4827         if (bp->gunzip_buf) {
4828                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4829                                     bp->gunzip_mapping);
4830                 bp->gunzip_buf = NULL;
4831         }
4832 }
4833
4834 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4835 {
4836         int n, rc;
4837
4838         /* check gzip header */
4839         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4840                 return -EINVAL;
4841
4842         n = 10;
4843
4844 #define FNAME                           0x8
4845
4846         if (zbuf[3] & FNAME)
4847                 while ((zbuf[n++] != 0) && (n < len));
4848
4849         bp->strm->next_in = zbuf + n;
4850         bp->strm->avail_in = len - n;
4851         bp->strm->next_out = bp->gunzip_buf;
4852         bp->strm->avail_out = FW_BUF_SIZE;
4853
4854         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4855         if (rc != Z_OK)
4856                 return rc;
4857
4858         rc = zlib_inflate(bp->strm, Z_FINISH);
4859         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4860                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4861                        bp->dev->name, bp->strm->msg);
4862
4863         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4864         if (bp->gunzip_outlen & 0x3)
4865                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4866                                     " gunzip_outlen (%d) not aligned\n",
4867                        bp->dev->name, bp->gunzip_outlen);
4868         bp->gunzip_outlen >>= 2;
4869
4870         zlib_inflateEnd(bp->strm);
4871
4872         if (rc == Z_STREAM_END)
4873                 return 0;
4874
4875         return rc;
4876 }
4877
4878 /* nic load/unload */
4879
4880 /*
4881  * General service functions
4882  */
4883
4884 /* send a NIG loopback debug packet */
4885 static void bnx2x_lb_pckt(struct bnx2x *bp)
4886 {
4887         u32 wb_write[3];
4888
4889         /* Ethernet source and destination addresses */
4890         wb_write[0] = 0x55555555;
4891         wb_write[1] = 0x55555555;
4892         wb_write[2] = 0x20;             /* SOP */
4893         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4894
4895         /* NON-IP protocol */
4896         wb_write[0] = 0x09000000;
4897         wb_write[1] = 0x55555555;
4898         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4899         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4900 }
4901
4902 /* some of the internal memories
4903  * are not directly readable from the driver
4904  * to test them we send debug packets
4905  */
4906 static int bnx2x_int_mem_test(struct bnx2x *bp)
4907 {
4908         int factor;
4909         int count, i;
4910         u32 val = 0;
4911
4912         if (CHIP_REV_IS_FPGA(bp))
4913                 factor = 120;
4914         else if (CHIP_REV_IS_EMUL(bp))
4915                 factor = 200;
4916         else
4917                 factor = 1;
4918
4919         DP(NETIF_MSG_HW, "start part1\n");
4920
4921         /* Disable inputs of parser neighbor blocks */
4922         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4923         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4924         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4925         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4926
4927         /*  Write 0 to parser credits for CFC search request */
4928         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4929
4930         /* send Ethernet packet */
4931         bnx2x_lb_pckt(bp);
4932
4933         /* TODO do i reset NIG statistic? */
4934         /* Wait until NIG register shows 1 packet of size 0x10 */
4935         count = 1000 * factor;
4936         while (count) {
4937
4938                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4939                 val = *bnx2x_sp(bp, wb_data[0]);
4940                 if (val == 0x10)
4941                         break;
4942
4943                 msleep(10);
4944                 count--;
4945         }
4946         if (val != 0x10) {
4947                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4948                 return -1;
4949         }
4950
4951         /* Wait until PRS register shows 1 packet */
4952         count = 1000 * factor;
4953         while (count) {
4954                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4955                 if (val == 1)
4956                         break;
4957
4958                 msleep(10);
4959                 count--;
4960         }
4961         if (val != 0x1) {
4962                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4963                 return -2;
4964         }
4965
4966         /* Reset and init BRB, PRS */
4967         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4968         msleep(50);
4969         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4970         msleep(50);
4971         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4972         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4973
4974         DP(NETIF_MSG_HW, "part2\n");
4975
4976         /* Disable inputs of parser neighbor blocks */
4977         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4978         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4979         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4980         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4981
4982         /* Write 0 to parser credits for CFC search request */
4983         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4984
4985         /* send 10 Ethernet packets */
4986         for (i = 0; i < 10; i++)
4987                 bnx2x_lb_pckt(bp);
4988
4989         /* Wait until NIG register shows 10 + 1
4990            packets of size 11*0x10 = 0xb0 */
4991         count = 1000 * factor;
4992         while (count) {
4993
4994                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4995                 val = *bnx2x_sp(bp, wb_data[0]);
4996                 if (val == 0xb0)
4997                         break;
4998
4999                 msleep(10);
5000                 count--;
5001         }
5002         if (val != 0xb0) {
5003                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5004                 return -3;
5005         }
5006
5007         /* Wait until PRS register shows 2 packets */
5008         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5009         if (val != 2)
5010                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5011
5012         /* Write 1 to parser credits for CFC search request */
5013         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5014
5015         /* Wait until PRS register shows 3 packets */
5016         msleep(10 * factor);
5017         /* Wait until NIG register shows 1 packet of size 0x10 */
5018         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019         if (val != 3)
5020                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5021
5022         /* clear NIG EOP FIFO */
5023         for (i = 0; i < 11; i++)
5024                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5025         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5026         if (val != 1) {
5027                 BNX2X_ERR("clear of NIG failed\n");
5028                 return -4;
5029         }
5030
5031         /* Reset and init BRB, PRS, NIG */
5032         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033         msleep(50);
5034         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035         msleep(50);
5036         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038 #ifndef BCM_ISCSI
5039         /* set NIC mode */
5040         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5041 #endif
5042
5043         /* Enable inputs of parser neighbor blocks */
5044         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5045         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5046         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5047         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5048
5049         DP(NETIF_MSG_HW, "done\n");
5050
5051         return 0; /* OK */
5052 }
5053
5054 static void enable_blocks_attention(struct bnx2x *bp)
5055 {
5056         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5057         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5058         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5059         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5060         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5061         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5062         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5063         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5064         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5065 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5066 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5067         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5068         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5069         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5070 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5071 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5072         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5073         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5074         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5075         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5076 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5077 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5078         if (CHIP_REV_IS_FPGA(bp))
5079                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5080         else
5081                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5082         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5083         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5084         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5085 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5086 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5087         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5088         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5089 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5090         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5091 }
5092
5093
5094 static int bnx2x_init_common(struct bnx2x *bp)
5095 {
5096         u32 val, i;
5097
5098         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5099
5100         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5101         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5102
5103         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5104         if (CHIP_IS_E1H(bp))
5105                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5106
5107         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5108         msleep(30);
5109         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5110
5111         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5112         if (CHIP_IS_E1(bp)) {
5113                 /* enable HW interrupt from PXP on USDM overflow
5114                    bit 16 on INT_MASK_0 */
5115                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5116         }
5117
5118         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5119         bnx2x_init_pxp(bp);
5120
5121 #ifdef __BIG_ENDIAN
5122         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5123         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5124         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5125         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5126         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5127         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5128
5129 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5130         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5131         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5132         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5133         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5134 #endif
5135
5136         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5137 #ifdef BCM_ISCSI
5138         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5139         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5140         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5141 #endif
5142
5143         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5144                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5145
5146         /* let the HW do it's magic ... */
5147         msleep(100);
5148         /* finish PXP init */
5149         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5150         if (val != 1) {
5151                 BNX2X_ERR("PXP2 CFG failed\n");
5152                 return -EBUSY;
5153         }
5154         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5155         if (val != 1) {
5156                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5157                 return -EBUSY;
5158         }
5159
5160         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5161         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5162
5163         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5164
5165         /* clean the DMAE memory */
5166         bp->dmae_ready = 1;
5167         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5168
5169         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5170         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5171         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5172         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5173
5174         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5175         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5176         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5177         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5178
5179         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5180         /* soft reset pulse */
5181         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5182         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5183
5184 #ifdef BCM_ISCSI
5185         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5186 #endif
5187
5188         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5189         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5190         if (!CHIP_REV_IS_SLOW(bp)) {
5191                 /* enable hw interrupt from doorbell Q */
5192                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5193         }
5194
5195         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5196         if (CHIP_REV_IS_SLOW(bp)) {
5197                 /* fix for emulation and FPGA for no pause */
5198                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5199                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5200                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5201                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5202         }
5203
5204         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5205         /* set NIC mode */
5206         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5207         if (CHIP_IS_E1H(bp))
5208                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5209
5210         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5211         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5212         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5213         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5214
5215         if (CHIP_IS_E1H(bp)) {
5216                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5217                                 STORM_INTMEM_SIZE_E1H/2);
5218                 bnx2x_init_fill(bp,
5219                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5220                                 0, STORM_INTMEM_SIZE_E1H/2);
5221                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5222                                 STORM_INTMEM_SIZE_E1H/2);
5223                 bnx2x_init_fill(bp,
5224                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5225                                 0, STORM_INTMEM_SIZE_E1H/2);
5226                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5227                                 STORM_INTMEM_SIZE_E1H/2);
5228                 bnx2x_init_fill(bp,
5229                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5230                                 0, STORM_INTMEM_SIZE_E1H/2);
5231                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5232                                 STORM_INTMEM_SIZE_E1H/2);
5233                 bnx2x_init_fill(bp,
5234                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5235                                 0, STORM_INTMEM_SIZE_E1H/2);
5236         } else { /* E1 */
5237                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5238                                 STORM_INTMEM_SIZE_E1);
5239                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5240                                 STORM_INTMEM_SIZE_E1);
5241                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5242                                 STORM_INTMEM_SIZE_E1);
5243                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5244                                 STORM_INTMEM_SIZE_E1);
5245         }
5246
5247         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5248         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5249         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5250         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5251
5252         /* sync semi rtc */
5253         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5254                0x80000000);
5255         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5256                0x80000000);
5257
5258         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5259         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5260         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5261
5262         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5263         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5264                 REG_WR(bp, i, 0xc0cac01a);
5265                 /* TODO: replace with something meaningful */
5266         }
5267         if (CHIP_IS_E1H(bp))
5268                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5269         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5270
5271         if (sizeof(union cdu_context) != 1024)
5272                 /* we currently assume that a context is 1024 bytes */
5273                 printk(KERN_ALERT PFX "please adjust the size of"
5274                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5275
5276         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5277         val = (4 << 24) + (0 << 12) + 1024;
5278         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5279         if (CHIP_IS_E1(bp)) {
5280                 /* !!! fix pxp client crdit until excel update */
5281                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5282                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5283         }
5284
5285         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5286         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5287
5288         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5289         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5290
5291         /* PXPCS COMMON comes here */
5292         /* Reset PCIE errors for debug */
5293         REG_WR(bp, 0x2814, 0xffffffff);
5294         REG_WR(bp, 0x3820, 0xffffffff);
5295
5296         /* EMAC0 COMMON comes here */
5297         /* EMAC1 COMMON comes here */
5298         /* DBU COMMON comes here */
5299         /* DBG COMMON comes here */
5300
5301         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5302         if (CHIP_IS_E1H(bp)) {
5303                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5304                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5305         }
5306
5307         if (CHIP_REV_IS_SLOW(bp))
5308                 msleep(200);
5309
5310         /* finish CFC init */
5311         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5312         if (val != 1) {
5313                 BNX2X_ERR("CFC LL_INIT failed\n");
5314                 return -EBUSY;
5315         }
5316         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5317         if (val != 1) {
5318                 BNX2X_ERR("CFC AC_INIT failed\n");
5319                 return -EBUSY;
5320         }
5321         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5322         if (val != 1) {
5323                 BNX2X_ERR("CFC CAM_INIT failed\n");
5324                 return -EBUSY;
5325         }
5326         REG_WR(bp, CFC_REG_DEBUG0, 0);
5327
5328         /* read NIG statistic
5329            to see if this is our first up since powerup */
5330         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5331         val = *bnx2x_sp(bp, wb_data[0]);
5332
5333         /* do internal memory self test */
5334         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5335                 BNX2X_ERR("internal mem self test failed\n");
5336                 return -EBUSY;
5337         }
5338
5339         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5340         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5341                 /* Fan failure is indicated by SPIO 5 */
5342                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5343                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5344
5345                 /* set to active low mode */
5346                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5347                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5348                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5349                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5350
5351                 /* enable interrupt to signal the IGU */
5352                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5353                 val |= (1 << MISC_REGISTERS_SPIO_5);
5354                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5355                 break;
5356
5357         default:
5358                 break;
5359         }
5360
5361         /* clear PXP2 attentions */
5362         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5363
5364         enable_blocks_attention(bp);
5365
5366         if (bp->flags & TPA_ENABLE_FLAG) {
5367                 struct tstorm_eth_tpa_exist tmp = {0};
5368
5369                 tmp.tpa_exist = 1;
5370
5371                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5372                        ((u32 *)&tmp)[0]);
5373                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5374                        ((u32 *)&tmp)[1]);
5375         }
5376
5377         if (!BP_NOMCP(bp)) {
5378                 bnx2x_acquire_phy_lock(bp);
5379                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5380                 bnx2x_release_phy_lock(bp);
5381         } else
5382                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5383
5384         return 0;
5385 }
5386
5387 static int bnx2x_init_port(struct bnx2x *bp)
5388 {
5389         int port = BP_PORT(bp);
5390         u32 val;
5391
5392         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5393
5394         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5395
5396         /* Port PXP comes here */
5397         /* Port PXP2 comes here */
5398 #ifdef BCM_ISCSI
5399         /* Port0  1
5400          * Port1  385 */
5401         i++;
5402         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5403         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5404         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5406
5407         /* Port0  2
5408          * Port1  386 */
5409         i++;
5410         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5411         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5412         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5414
5415         /* Port0  3
5416          * Port1  387 */
5417         i++;
5418         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5419         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5420         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5421         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5422 #endif
5423         /* Port CMs come here */
5424
5425         /* Port QM comes here */
5426 #ifdef BCM_ISCSI
5427         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5428         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5429
5430         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5431                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5432 #endif
5433         /* Port DQ comes here */
5434         /* Port BRB1 comes here */
5435         /* Port PRS comes here */
5436         /* Port TSDM comes here */
5437         /* Port CSDM comes here */
5438         /* Port USDM comes here */
5439         /* Port XSDM comes here */
5440         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5441                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5442         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5443                              port ? USEM_PORT1_END : USEM_PORT0_END);
5444         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5445                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5446         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5447                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5448         /* Port UPB comes here */
5449         /* Port XPB comes here */
5450
5451         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5452                              port ? PBF_PORT1_END : PBF_PORT0_END);
5453
5454         /* configure PBF to work without PAUSE mtu 9000 */
5455         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5456
5457         /* update threshold */
5458         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5459         /* update init credit */
5460         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5461
5462         /* probe changes */
5463         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5464         msleep(5);
5465         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5466
5467 #ifdef BCM_ISCSI
5468         /* tell the searcher where the T2 table is */
5469         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5470
5471         wb_write[0] = U64_LO(bp->t2_mapping);
5472         wb_write[1] = U64_HI(bp->t2_mapping);
5473         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5474         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5475         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5476         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5477
5478         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5479         /* Port SRCH comes here */
5480 #endif
5481         /* Port CDU comes here */
5482         /* Port CFC comes here */
5483
5484         if (CHIP_IS_E1(bp)) {
5485                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5486                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5487         }
5488         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5489                              port ? HC_PORT1_END : HC_PORT0_END);
5490
5491         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5492                                     MISC_AEU_PORT0_START,
5493                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5494         /* init aeu_mask_attn_func_0/1:
5495          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5496          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5497          *             bits 4-7 are used for "per vn group attention" */
5498         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5499                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5500
5501         /* Port PXPCS comes here */
5502         /* Port EMAC0 comes here */
5503         /* Port EMAC1 comes here */
5504         /* Port DBU comes here */
5505         /* Port DBG comes here */
5506         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5507                              port ? NIG_PORT1_END : NIG_PORT0_END);
5508
5509         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5510
5511         if (CHIP_IS_E1H(bp)) {
5512                 u32 wsum;
5513                 struct cmng_struct_per_port m_cmng_port;
5514                 int vn;
5515
5516                 /* 0x2 disable e1hov, 0x1 enable */
5517                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5518                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5519
5520                 /* Init RATE SHAPING and FAIRNESS contexts.
5521                    Initialize as if there is 10G link. */
5522                 wsum = bnx2x_calc_vn_wsum(bp);
5523                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5524                 if (IS_E1HMF(bp))
5525                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5526                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5527                                         wsum, 10000, &m_cmng_port);
5528         }
5529
5530         /* Port MCP comes here */
5531         /* Port DMAE comes here */
5532
5533         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5534         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5535                 /* add SPIO 5 to group 0 */
5536                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5537                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5538                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5539                 break;
5540
5541         default:
5542                 break;
5543         }
5544
5545         bnx2x__link_reset(bp);
5546
5547         return 0;
5548 }
5549
5550 #define ILT_PER_FUNC            (768/2)
5551 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5552 /* the phys address is shifted right 12 bits and has an added
5553    1=valid bit added to the 53rd bit
5554    then since this is a wide register(TM)
5555    we split it into two 32 bit writes
5556  */
5557 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5558 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5559 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5560 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5561
5562 #define CNIC_ILT_LINES          0
5563
5564 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5565 {
5566         int reg;
5567
5568         if (CHIP_IS_E1H(bp))
5569                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5570         else /* E1 */
5571                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5572
5573         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5574 }
5575
5576 static int bnx2x_init_func(struct bnx2x *bp)
5577 {
5578         int port = BP_PORT(bp);
5579         int func = BP_FUNC(bp);
5580         int i;
5581
5582         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5583
5584         i = FUNC_ILT_BASE(func);
5585
5586         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5587         if (CHIP_IS_E1H(bp)) {
5588                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5589                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5590         } else /* E1 */
5591                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5592                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5593
5594
5595         if (CHIP_IS_E1H(bp)) {
5596                 for (i = 0; i < 9; i++)
5597                         bnx2x_init_block(bp,
5598                                          cm_start[func][i], cm_end[func][i]);
5599
5600                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5601                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5602         }
5603
5604         /* HC init per function */
5605         if (CHIP_IS_E1H(bp)) {
5606                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5607
5608                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5609                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5610         }
5611         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5612
5613         if (CHIP_IS_E1H(bp))
5614                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5615
5616         /* Reset PCIE errors for debug */
5617         REG_WR(bp, 0x2114, 0xffffffff);
5618         REG_WR(bp, 0x2120, 0xffffffff);
5619
5620         return 0;
5621 }
5622
5623 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5624 {
5625         int i, rc = 0;
5626
5627         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5628            BP_FUNC(bp), load_code);
5629
5630         bp->dmae_ready = 0;
5631         mutex_init(&bp->dmae_mutex);
5632         bnx2x_gunzip_init(bp);
5633
5634         switch (load_code) {
5635         case FW_MSG_CODE_DRV_LOAD_COMMON:
5636                 rc = bnx2x_init_common(bp);
5637                 if (rc)
5638                         goto init_hw_err;
5639                 /* no break */
5640
5641         case FW_MSG_CODE_DRV_LOAD_PORT:
5642                 bp->dmae_ready = 1;
5643                 rc = bnx2x_init_port(bp);
5644                 if (rc)
5645                         goto init_hw_err;
5646                 /* no break */
5647
5648         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5649                 bp->dmae_ready = 1;
5650                 rc = bnx2x_init_func(bp);
5651                 if (rc)
5652                         goto init_hw_err;
5653                 break;
5654
5655         default:
5656                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5657                 break;
5658         }
5659
5660         if (!BP_NOMCP(bp)) {
5661                 int func = BP_FUNC(bp);
5662
5663                 bp->fw_drv_pulse_wr_seq =
5664                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5665                                  DRV_PULSE_SEQ_MASK);
5666                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5667                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5668                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5669         } else
5670                 bp->func_stx = 0;
5671
5672         /* this needs to be done before gunzip end */
5673         bnx2x_zero_def_sb(bp);
5674         for_each_queue(bp, i)
5675                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5676
5677 init_hw_err:
5678         bnx2x_gunzip_end(bp);
5679
5680         return rc;
5681 }
5682
5683 /* send the MCP a request, block until there is a reply */
5684 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5685 {
5686         int func = BP_FUNC(bp);
5687         u32 seq = ++bp->fw_seq;
5688         u32 rc = 0;
5689         u32 cnt = 1;
5690         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5691
5692         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5693         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5694
5695         do {
5696                 /* let the FW do it's magic ... */
5697                 msleep(delay);
5698
5699                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5700
5701                 /* Give the FW up to 2 second (200*10ms) */
5702         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5703
5704         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5705            cnt*delay, rc, seq);
5706
5707         /* is this a reply to our command? */
5708         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5709                 rc &= FW_MSG_CODE_MASK;
5710
5711         } else {
5712                 /* FW BUG! */
5713                 BNX2X_ERR("FW failed to respond!\n");
5714                 bnx2x_fw_dump(bp);
5715                 rc = 0;
5716         }
5717
5718         return rc;
5719 }
5720
5721 static void bnx2x_free_mem(struct bnx2x *bp)
5722 {
5723
5724 #define BNX2X_PCI_FREE(x, y, size) \
5725         do { \
5726                 if (x) { \
5727                         pci_free_consistent(bp->pdev, size, x, y); \
5728                         x = NULL; \
5729                         y = 0; \
5730                 } \
5731         } while (0)
5732
5733 #define BNX2X_FREE(x) \
5734         do { \
5735                 if (x) { \
5736                         vfree(x); \
5737                         x = NULL; \
5738                 } \
5739         } while (0)
5740
5741         int i;
5742
5743         /* fastpath */
5744         for_each_queue(bp, i) {
5745
5746                 /* Status blocks */
5747                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5748                                bnx2x_fp(bp, i, status_blk_mapping),
5749                                sizeof(struct host_status_block) +
5750                                sizeof(struct eth_tx_db_data));
5751
5752                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5753                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5754                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5755                                bnx2x_fp(bp, i, tx_desc_mapping),
5756                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5757
5758                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5760                                bnx2x_fp(bp, i, rx_desc_mapping),
5761                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5762
5763                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5764                                bnx2x_fp(bp, i, rx_comp_mapping),
5765                                sizeof(struct eth_fast_path_rx_cqe) *
5766                                NUM_RCQ_BD);
5767
5768                 /* SGE ring */
5769                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5770                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5771                                bnx2x_fp(bp, i, rx_sge_mapping),
5772                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5773         }
5774         /* end of fastpath */
5775
5776         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5777                        sizeof(struct host_def_status_block));
5778
5779         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5780                        sizeof(struct bnx2x_slowpath));
5781
5782 #ifdef BCM_ISCSI
5783         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5784         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5785         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5786         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5787 #endif
5788         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5789
5790 #undef BNX2X_PCI_FREE
5791 #undef BNX2X_KFREE
5792 }
5793
5794 static int bnx2x_alloc_mem(struct bnx2x *bp)
5795 {
5796
5797 #define BNX2X_PCI_ALLOC(x, y, size) \
5798         do { \
5799                 x = pci_alloc_consistent(bp->pdev, size, y); \
5800                 if (x == NULL) \
5801                         goto alloc_mem_err; \
5802                 memset(x, 0, size); \
5803         } while (0)
5804
5805 #define BNX2X_ALLOC(x, size) \
5806         do { \
5807                 x = vmalloc(size); \
5808                 if (x == NULL) \
5809                         goto alloc_mem_err; \
5810                 memset(x, 0, size); \
5811         } while (0)
5812
5813         int i;
5814
5815         /* fastpath */
5816         for_each_queue(bp, i) {
5817                 bnx2x_fp(bp, i, bp) = bp;
5818
5819                 /* Status blocks */
5820                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5821                                 &bnx2x_fp(bp, i, status_blk_mapping),
5822                                 sizeof(struct host_status_block) +
5823                                 sizeof(struct eth_tx_db_data));
5824
5825                 bnx2x_fp(bp, i, hw_tx_prods) =
5826                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5827
5828                 bnx2x_fp(bp, i, tx_prods_mapping) =
5829                                 bnx2x_fp(bp, i, status_blk_mapping) +
5830                                 sizeof(struct host_status_block);
5831
5832                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5833                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5834                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5835                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5836                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5837                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5838
5839                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5840                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5841                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5842                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5843                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5844
5845                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5846                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5847                                 sizeof(struct eth_fast_path_rx_cqe) *
5848                                 NUM_RCQ_BD);
5849
5850                 /* SGE ring */
5851                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5852                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5853                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5854                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5855                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5856         }
5857         /* end of fastpath */
5858
5859         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5860                         sizeof(struct host_def_status_block));
5861
5862         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5863                         sizeof(struct bnx2x_slowpath));
5864
5865 #ifdef BCM_ISCSI
5866         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5867
5868         /* Initialize T1 */
5869         for (i = 0; i < 64*1024; i += 64) {
5870                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5871                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5872         }
5873
5874         /* allocate searcher T2 table
5875            we allocate 1/4 of alloc num for T2
5876           (which is not entered into the ILT) */
5877         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5878
5879         /* Initialize T2 */
5880         for (i = 0; i < 16*1024; i += 64)
5881                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5882
5883         /* now fixup the last line in the block to point to the next block */
5884         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5885
5886         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5887         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5888
5889         /* QM queues (128*MAX_CONN) */
5890         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5891 #endif
5892
5893         /* Slow path ring */
5894         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5895
5896         return 0;
5897
5898 alloc_mem_err:
5899         bnx2x_free_mem(bp);
5900         return -ENOMEM;
5901
5902 #undef BNX2X_PCI_ALLOC
5903 #undef BNX2X_ALLOC
5904 }
5905
5906 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5907 {
5908         int i;
5909
5910         for_each_queue(bp, i) {
5911                 struct bnx2x_fastpath *fp = &bp->fp[i];
5912
5913                 u16 bd_cons = fp->tx_bd_cons;
5914                 u16 sw_prod = fp->tx_pkt_prod;
5915                 u16 sw_cons = fp->tx_pkt_cons;
5916
5917                 while (sw_cons != sw_prod) {
5918                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5919                         sw_cons++;
5920                 }
5921         }
5922 }
5923
5924 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5925 {
5926         int i, j;
5927
5928         for_each_queue(bp, j) {
5929                 struct bnx2x_fastpath *fp = &bp->fp[j];
5930
5931                 for (i = 0; i < NUM_RX_BD; i++) {
5932                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5933                         struct sk_buff *skb = rx_buf->skb;
5934
5935                         if (skb == NULL)
5936                                 continue;
5937
5938                         pci_unmap_single(bp->pdev,
5939                                          pci_unmap_addr(rx_buf, mapping),
5940                                          bp->rx_buf_use_size,
5941                                          PCI_DMA_FROMDEVICE);
5942
5943                         rx_buf->skb = NULL;
5944                         dev_kfree_skb(skb);
5945                 }
5946                 if (!fp->disable_tpa)
5947                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5948                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5949                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5950         }
5951 }
5952
5953 static void bnx2x_free_skbs(struct bnx2x *bp)
5954 {
5955         bnx2x_free_tx_skbs(bp);
5956         bnx2x_free_rx_skbs(bp);
5957 }
5958
5959 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5960 {
5961         int i, offset = 1;
5962
5963         free_irq(bp->msix_table[0].vector, bp->dev);
5964         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5965            bp->msix_table[0].vector);
5966
5967         for_each_queue(bp, i) {
5968                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5969                    "state %x\n", i, bp->msix_table[i + offset].vector,
5970                    bnx2x_fp(bp, i, state));
5971
5972                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5973                         BNX2X_ERR("IRQ of fp #%d being freed while "
5974                                   "state != closed\n", i);
5975
5976                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5977         }
5978 }
5979
5980 static void bnx2x_free_irq(struct bnx2x *bp)
5981 {
5982         if (bp->flags & USING_MSIX_FLAG) {
5983                 bnx2x_free_msix_irqs(bp);
5984                 pci_disable_msix(bp->pdev);
5985                 bp->flags &= ~USING_MSIX_FLAG;
5986
5987         } else
5988                 free_irq(bp->pdev->irq, bp->dev);
5989 }
5990
5991 static int bnx2x_enable_msix(struct bnx2x *bp)
5992 {
5993         int i, rc, offset;
5994
5995         bp->msix_table[0].entry = 0;
5996         offset = 1;
5997         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5998
5999         for_each_queue(bp, i) {
6000                 int igu_vec = offset + i + BP_L_ID(bp);
6001
6002                 bp->msix_table[i + offset].entry = igu_vec;
6003                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6004                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6005         }
6006
6007         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6008                              bp->num_queues + offset);
6009         if (rc) {
6010                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6011                 return -1;
6012         }
6013         bp->flags |= USING_MSIX_FLAG;
6014
6015         return 0;
6016 }
6017
6018 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6019 {
6020         int i, rc, offset = 1;
6021
6022         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6023                          bp->dev->name, bp->dev);
6024         if (rc) {
6025                 BNX2X_ERR("request sp irq failed\n");
6026                 return -EBUSY;
6027         }
6028
6029         for_each_queue(bp, i) {
6030                 rc = request_irq(bp->msix_table[i + offset].vector,
6031                                  bnx2x_msix_fp_int, 0,
6032                                  bp->dev->name, &bp->fp[i]);
6033                 if (rc) {
6034                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6035                                   i + offset, -rc);
6036                         bnx2x_free_msix_irqs(bp);
6037                         return -EBUSY;
6038                 }
6039
6040                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6041         }
6042
6043         return 0;
6044 }
6045
6046 static int bnx2x_req_irq(struct bnx2x *bp)
6047 {
6048         int rc;
6049
6050         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6051                          bp->dev->name, bp->dev);
6052         if (!rc)
6053                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6054
6055         return rc;
6056 }
6057
6058 /*
6059  * Init service functions
6060  */
6061
6062 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6063 {
6064         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6065         int port = BP_PORT(bp);
6066
6067         /* CAM allocation
6068          * unicasts 0-31:port0 32-63:port1
6069          * multicast 64-127:port0 128-191:port1
6070          */
6071         config->hdr.length_6b = 2;
6072         config->hdr.offset = port ? 31 : 0;
6073         config->hdr.client_id = BP_CL_ID(bp);
6074         config->hdr.reserved1 = 0;
6075
6076         /* primary MAC */
6077         config->config_table[0].cam_entry.msb_mac_addr =
6078                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6079         config->config_table[0].cam_entry.middle_mac_addr =
6080                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6081         config->config_table[0].cam_entry.lsb_mac_addr =
6082                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6083         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6084         if (set)
6085                 config->config_table[0].target_table_entry.flags = 0;
6086         else
6087                 CAM_INVALIDATE(config->config_table[0]);
6088         config->config_table[0].target_table_entry.client_id = 0;
6089         config->config_table[0].target_table_entry.vlan_id = 0;
6090
6091         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6092            (set ? "setting" : "clearing"),
6093            config->config_table[0].cam_entry.msb_mac_addr,
6094            config->config_table[0].cam_entry.middle_mac_addr,
6095            config->config_table[0].cam_entry.lsb_mac_addr);
6096
6097         /* broadcast */
6098         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6099         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6100         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6101         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6102         if (set)
6103                 config->config_table[1].target_table_entry.flags =
6104                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6105         else
6106                 CAM_INVALIDATE(config->config_table[1]);
6107         config->config_table[1].target_table_entry.client_id = 0;
6108         config->config_table[1].target_table_entry.vlan_id = 0;
6109
6110         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6111                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6112                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6113 }
6114
6115 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6116 {
6117         struct mac_configuration_cmd_e1h *config =
6118                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6119
6120         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6121                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6122                 return;
6123         }
6124
6125         /* CAM allocation for E1H
6126          * unicasts: by func number
6127          * multicast: 20+FUNC*20, 20 each
6128          */
6129         config->hdr.length_6b = 1;
6130         config->hdr.offset = BP_FUNC(bp);
6131         config->hdr.client_id = BP_CL_ID(bp);
6132         config->hdr.reserved1 = 0;
6133
6134         /* primary MAC */
6135         config->config_table[0].msb_mac_addr =
6136                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6137         config->config_table[0].middle_mac_addr =
6138                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6139         config->config_table[0].lsb_mac_addr =
6140                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6141         config->config_table[0].client_id = BP_L_ID(bp);
6142         config->config_table[0].vlan_id = 0;
6143         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6144         if (set)
6145                 config->config_table[0].flags = BP_PORT(bp);
6146         else
6147                 config->config_table[0].flags =
6148                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6149
6150         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6151            (set ? "setting" : "clearing"),
6152            config->config_table[0].msb_mac_addr,
6153            config->config_table[0].middle_mac_addr,
6154            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6155
6156         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6157                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6158                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6159 }
6160
6161 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6162                              int *state_p, int poll)
6163 {
6164         /* can take a while if any port is running */
6165         int cnt = 500;
6166
6167         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6168            poll ? "polling" : "waiting", state, idx);
6169
6170         might_sleep();
6171         while (cnt--) {
6172                 if (poll) {
6173                         bnx2x_rx_int(bp->fp, 10);
6174                         /* if index is different from 0
6175                          * the reply for some commands will
6176                          * be on the non default queue
6177                          */
6178                         if (idx)
6179                                 bnx2x_rx_int(&bp->fp[idx], 10);
6180                 }
6181
6182                 mb(); /* state is changed by bnx2x_sp_event() */
6183                 if (*state_p == state)
6184                         return 0;
6185
6186                 msleep(1);
6187         }
6188
6189         /* timeout! */
6190         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6191                   poll ? "polling" : "waiting", state, idx);
6192 #ifdef BNX2X_STOP_ON_ERROR
6193         bnx2x_panic();
6194 #endif
6195
6196         return -EBUSY;
6197 }
6198
6199 static int bnx2x_setup_leading(struct bnx2x *bp)
6200 {
6201         int rc;
6202
6203         /* reset IGU state */
6204         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6205
6206         /* SETUP ramrod */
6207         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6208
6209         /* Wait for completion */
6210         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6211
6212         return rc;
6213 }
6214
6215 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6216 {
6217         /* reset IGU state */
6218         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6219
6220         /* SETUP ramrod */
6221         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6222         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6223
6224         /* Wait for completion */
6225         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6226                                  &(bp->fp[index].state), 0);
6227 }
6228
6229 static int bnx2x_poll(struct napi_struct *napi, int budget);
6230 static void bnx2x_set_rx_mode(struct net_device *dev);
6231
6232 /* must be called with rtnl_lock */
6233 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6234 {
6235         u32 load_code;
6236         int i, rc;
6237 #ifdef BNX2X_STOP_ON_ERROR
6238         if (unlikely(bp->panic))
6239                 return -EPERM;
6240 #endif
6241
6242         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6243
6244         /* Send LOAD_REQUEST command to MCP
6245            Returns the type of LOAD command:
6246            if it is the first port to be initialized
6247            common blocks should be initialized, otherwise - not
6248         */
6249         if (!BP_NOMCP(bp)) {
6250                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6251                 if (!load_code) {
6252                         BNX2X_ERR("MCP response failure, aborting\n");
6253                         return -EBUSY;
6254                 }
6255                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6256                         return -EBUSY; /* other port in diagnostic mode */
6257
6258         } else {
6259                 int port = BP_PORT(bp);
6260
6261                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6262                    load_count[0], load_count[1], load_count[2]);
6263                 load_count[0]++;
6264                 load_count[1 + port]++;
6265                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6266                    load_count[0], load_count[1], load_count[2]);
6267                 if (load_count[0] == 1)
6268                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6269                 else if (load_count[1 + port] == 1)
6270                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6271                 else
6272                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6273         }
6274
6275         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6276             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6277                 bp->port.pmf = 1;
6278         else
6279                 bp->port.pmf = 0;
6280         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6281
6282         /* if we can't use MSI-X we only need one fp,
6283          * so try to enable MSI-X with the requested number of fp's
6284          * and fallback to inta with one fp
6285          */
6286         if (use_inta) {
6287                 bp->num_queues = 1;
6288
6289         } else {
6290                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6291                         /* user requested number */
6292                         bp->num_queues = use_multi;
6293
6294                 else if (use_multi)
6295                         bp->num_queues = min_t(u32, num_online_cpus(),
6296                                                BP_MAX_QUEUES(bp));
6297                 else
6298                         bp->num_queues = 1;
6299
6300                 if (bnx2x_enable_msix(bp)) {
6301                         /* failed to enable MSI-X */
6302                         bp->num_queues = 1;
6303                         if (use_multi)
6304                                 BNX2X_ERR("Multi requested but failed"
6305                                           " to enable MSI-X\n");
6306                 }
6307         }
6308         DP(NETIF_MSG_IFUP,
6309            "set number of queues to %d\n", bp->num_queues);
6310
6311         if (bnx2x_alloc_mem(bp))
6312                 return -ENOMEM;
6313
6314         for_each_queue(bp, i)
6315                 bnx2x_fp(bp, i, disable_tpa) =
6316                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6317
6318         if (bp->flags & USING_MSIX_FLAG) {
6319                 rc = bnx2x_req_msix_irqs(bp);
6320                 if (rc) {
6321                         pci_disable_msix(bp->pdev);
6322                         goto load_error;
6323                 }
6324         } else {
6325                 bnx2x_ack_int(bp);
6326                 rc = bnx2x_req_irq(bp);
6327                 if (rc) {
6328                         BNX2X_ERR("IRQ request failed, aborting\n");
6329                         goto load_error;
6330                 }
6331         }
6332
6333         for_each_queue(bp, i)
6334                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6335                                bnx2x_poll, 128);
6336
6337         /* Initialize HW */
6338         rc = bnx2x_init_hw(bp, load_code);
6339         if (rc) {
6340                 BNX2X_ERR("HW init failed, aborting\n");
6341                 goto load_error;
6342         }
6343
6344         /* Setup NIC internals and enable interrupts */
6345         bnx2x_nic_init(bp, load_code);
6346
6347         /* Send LOAD_DONE command to MCP */
6348         if (!BP_NOMCP(bp)) {
6349                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6350                 if (!load_code) {
6351                         BNX2X_ERR("MCP response failure, aborting\n");
6352                         rc = -EBUSY;
6353                         goto load_int_disable;
6354                 }
6355         }
6356
6357         bnx2x_stats_init(bp);
6358
6359         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6360
6361         /* Enable Rx interrupt handling before sending the ramrod
6362            as it's completed on Rx FP queue */
6363         for_each_queue(bp, i)
6364                 napi_enable(&bnx2x_fp(bp, i, napi));
6365
6366         /* Enable interrupt handling */
6367         atomic_set(&bp->intr_sem, 0);
6368
6369         rc = bnx2x_setup_leading(bp);
6370         if (rc) {
6371                 BNX2X_ERR("Setup leading failed!\n");
6372                 goto load_stop_netif;
6373         }
6374
6375         if (CHIP_IS_E1H(bp))
6376                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6377                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6378                         bp->state = BNX2X_STATE_DISABLED;
6379                 }
6380
6381         if (bp->state == BNX2X_STATE_OPEN)
6382                 for_each_nondefault_queue(bp, i) {
6383                         rc = bnx2x_setup_multi(bp, i);
6384                         if (rc)
6385                                 goto load_stop_netif;
6386                 }
6387
6388         if (CHIP_IS_E1(bp))
6389                 bnx2x_set_mac_addr_e1(bp, 1);
6390         else
6391                 bnx2x_set_mac_addr_e1h(bp, 1);
6392
6393         if (bp->port.pmf)
6394                 bnx2x_initial_phy_init(bp);
6395
6396         /* Start fast path */
6397         switch (load_mode) {
6398         case LOAD_NORMAL:
6399                 /* Tx queue should be only reenabled */
6400                 netif_wake_queue(bp->dev);
6401                 bnx2x_set_rx_mode(bp->dev);
6402                 break;
6403
6404         case LOAD_OPEN:
6405                 netif_start_queue(bp->dev);
6406                 bnx2x_set_rx_mode(bp->dev);
6407                 if (bp->flags & USING_MSIX_FLAG)
6408                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6409                                bp->dev->name);
6410                 break;
6411
6412         case LOAD_DIAG:
6413                 bnx2x_set_rx_mode(bp->dev);
6414                 bp->state = BNX2X_STATE_DIAG;
6415                 break;
6416
6417         default:
6418                 break;
6419         }
6420
6421         if (!bp->port.pmf)
6422                 bnx2x__link_status_update(bp);
6423
6424         /* start the timer */
6425         mod_timer(&bp->timer, jiffies + bp->current_interval);
6426
6427
6428         return 0;
6429
6430 load_stop_netif:
6431         for_each_queue(bp, i)
6432                 napi_disable(&bnx2x_fp(bp, i, napi));
6433
6434 load_int_disable:
6435         bnx2x_int_disable_sync(bp);
6436
6437         /* Release IRQs */
6438         bnx2x_free_irq(bp);
6439
6440         /* Free SKBs, SGEs, TPA pool and driver internals */
6441         bnx2x_free_skbs(bp);
6442         for_each_queue(bp, i)
6443                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6444 load_error:
6445         bnx2x_free_mem(bp);
6446
6447         /* TBD we really need to reset the chip
6448            if we want to recover from this */
6449         return rc;
6450 }
6451
6452 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6453 {
6454         int rc;
6455
6456         /* halt the connection */
6457         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6458         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6459
6460         /* Wait for completion */
6461         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6462                                &(bp->fp[index].state), 1);
6463         if (rc) /* timeout */
6464                 return rc;
6465
6466         /* delete cfc entry */
6467         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6468
6469         /* Wait for completion */
6470         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6471                                &(bp->fp[index].state), 1);
6472         return rc;
6473 }
6474
6475 static int bnx2x_stop_leading(struct bnx2x *bp)
6476 {
6477         u16 dsb_sp_prod_idx;
6478         /* if the other port is handling traffic,
6479            this can take a lot of time */
6480         int cnt = 500;
6481         int rc;
6482
6483         might_sleep();
6484
6485         /* Send HALT ramrod */
6486         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6487         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6488
6489         /* Wait for completion */
6490         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6491                                &(bp->fp[0].state), 1);
6492         if (rc) /* timeout */
6493                 return rc;
6494
6495         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6496
6497         /* Send PORT_DELETE ramrod */
6498         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6499
6500         /* Wait for completion to arrive on default status block
6501            we are going to reset the chip anyway
6502            so there is not much to do if this times out
6503          */
6504         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6505                 if (!cnt) {
6506                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6507                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6508                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6509 #ifdef BNX2X_STOP_ON_ERROR
6510                         bnx2x_panic();
6511 #else
6512                         rc = -EBUSY;
6513 #endif
6514                         break;
6515                 }
6516                 cnt--;
6517                 msleep(1);
6518         }
6519         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6520         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6521
6522         return rc;
6523 }
6524
6525 static void bnx2x_reset_func(struct bnx2x *bp)
6526 {
6527         int port = BP_PORT(bp);
6528         int func = BP_FUNC(bp);
6529         int base, i;
6530
6531         /* Configure IGU */
6532         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6533         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6534
6535         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6536
6537         /* Clear ILT */
6538         base = FUNC_ILT_BASE(func);
6539         for (i = base; i < base + ILT_PER_FUNC; i++)
6540                 bnx2x_ilt_wr(bp, i, 0);
6541 }
6542
6543 static void bnx2x_reset_port(struct bnx2x *bp)
6544 {
6545         int port = BP_PORT(bp);
6546         u32 val;
6547
6548         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6549
6550         /* Do not rcv packets to BRB */
6551         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6552         /* Do not direct rcv packets that are not for MCP to the BRB */
6553         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6554                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6555
6556         /* Configure AEU */
6557         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6558
6559         msleep(100);
6560         /* Check for BRB port occupancy */
6561         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6562         if (val)
6563                 DP(NETIF_MSG_IFDOWN,
6564                    "BRB1 is not empty  %d blocks are occupied\n", val);
6565
6566         /* TODO: Close Doorbell port? */
6567 }
6568
6569 static void bnx2x_reset_common(struct bnx2x *bp)
6570 {
6571         /* reset_common */
6572         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6573                0xd3ffff7f);
6574         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6575 }
6576
6577 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6578 {
6579         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6580            BP_FUNC(bp), reset_code);
6581
6582         switch (reset_code) {
6583         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6584                 bnx2x_reset_port(bp);
6585                 bnx2x_reset_func(bp);
6586                 bnx2x_reset_common(bp);
6587                 break;
6588
6589         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6590                 bnx2x_reset_port(bp);
6591                 bnx2x_reset_func(bp);
6592                 break;
6593
6594         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6595                 bnx2x_reset_func(bp);
6596                 break;
6597
6598         default:
6599                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6600                 break;
6601         }
6602 }
6603
6604 /* must be called with rtnl_lock */
6605 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6606 {
6607         int port = BP_PORT(bp);
6608         u32 reset_code = 0;
6609         int i, cnt, rc;
6610
6611         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6612
6613         bp->rx_mode = BNX2X_RX_MODE_NONE;
6614         bnx2x_set_storm_rx_mode(bp);
6615
6616         if (netif_running(bp->dev)) {
6617                 netif_tx_disable(bp->dev);
6618                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6619         }
6620
6621         del_timer_sync(&bp->timer);
6622         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6623                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6624         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6625
6626         /* Wait until tx fast path tasks complete */
6627         for_each_queue(bp, i) {
6628                 struct bnx2x_fastpath *fp = &bp->fp[i];
6629
6630                 cnt = 1000;
6631                 smp_rmb();
6632                 while (BNX2X_HAS_TX_WORK(fp)) {
6633
6634                         if (!netif_running(bp->dev))
6635                                 bnx2x_tx_int(fp, 1000);
6636
6637                         if (!cnt) {
6638                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6639                                           i);
6640 #ifdef BNX2X_STOP_ON_ERROR
6641                                 bnx2x_panic();
6642                                 return -EBUSY;
6643 #else
6644                                 break;
6645 #endif
6646                         }
6647                         cnt--;
6648                         msleep(1);
6649                         smp_rmb();
6650                 }
6651         }
6652
6653         /* Give HW time to discard old tx messages */
6654         msleep(1);
6655
6656         for_each_queue(bp, i)
6657                 napi_disable(&bnx2x_fp(bp, i, napi));
6658         /* Disable interrupts after Tx and Rx are disabled on stack level */
6659         bnx2x_int_disable_sync(bp);
6660
6661         /* Release IRQs */
6662         bnx2x_free_irq(bp);
6663
6664         if (unload_mode == UNLOAD_NORMAL)
6665                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6666
6667         else if (bp->flags & NO_WOL_FLAG) {
6668                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6669                 if (CHIP_IS_E1H(bp))
6670                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6671
6672         } else if (bp->wol) {
6673                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6674                 u8 *mac_addr = bp->dev->dev_addr;
6675                 u32 val;
6676                 /* The mac address is written to entries 1-4 to
6677                    preserve entry 0 which is used by the PMF */
6678                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6679
6680                 val = (mac_addr[0] << 8) | mac_addr[1];
6681                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6682
6683                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6684                       (mac_addr[4] << 8) | mac_addr[5];
6685                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6686
6687                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6688
6689         } else
6690                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6691
6692         if (CHIP_IS_E1(bp)) {
6693                 struct mac_configuration_cmd *config =
6694                                                 bnx2x_sp(bp, mcast_config);
6695
6696                 bnx2x_set_mac_addr_e1(bp, 0);
6697
6698                 for (i = 0; i < config->hdr.length_6b; i++)
6699                         CAM_INVALIDATE(config->config_table[i]);
6700
6701                 config->hdr.length_6b = i;
6702                 if (CHIP_REV_IS_SLOW(bp))
6703                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6704                 else
6705                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6706                 config->hdr.client_id = BP_CL_ID(bp);
6707                 config->hdr.reserved1 = 0;
6708
6709                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6710                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6711                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6712
6713         } else { /* E1H */
6714                 bnx2x_set_mac_addr_e1h(bp, 0);
6715
6716                 for (i = 0; i < MC_HASH_SIZE; i++)
6717                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6718         }
6719
6720         if (CHIP_IS_E1H(bp))
6721                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6722
6723         /* Close multi and leading connections
6724            Completions for ramrods are collected in a synchronous way */
6725         for_each_nondefault_queue(bp, i)
6726                 if (bnx2x_stop_multi(bp, i))
6727                         goto unload_error;
6728
6729         rc = bnx2x_stop_leading(bp);
6730         if (rc) {
6731                 BNX2X_ERR("Stop leading failed!\n");
6732 #ifdef BNX2X_STOP_ON_ERROR
6733                 return -EBUSY;
6734 #else
6735                 goto unload_error;
6736 #endif
6737         }
6738
6739 unload_error:
6740         if (!BP_NOMCP(bp))
6741                 reset_code = bnx2x_fw_command(bp, reset_code);
6742         else {
6743                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6744                    load_count[0], load_count[1], load_count[2]);
6745                 load_count[0]--;
6746                 load_count[1 + port]--;
6747                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6748                    load_count[0], load_count[1], load_count[2]);
6749                 if (load_count[0] == 0)
6750                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6751                 else if (load_count[1 + port] == 0)
6752                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6753                 else
6754                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6755         }
6756
6757         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6758             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6759                 bnx2x__link_reset(bp);
6760
6761         /* Reset the chip */
6762         bnx2x_reset_chip(bp, reset_code);
6763
6764         /* Report UNLOAD_DONE to MCP */
6765         if (!BP_NOMCP(bp))
6766                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6767
6768         /* Free SKBs, SGEs, TPA pool and driver internals */
6769         bnx2x_free_skbs(bp);
6770         for_each_queue(bp, i)
6771                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6772         bnx2x_free_mem(bp);
6773
6774         bp->state = BNX2X_STATE_CLOSED;
6775
6776         netif_carrier_off(bp->dev);
6777
6778         return 0;
6779 }
6780
6781 static void bnx2x_reset_task(struct work_struct *work)
6782 {
6783         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6784
6785 #ifdef BNX2X_STOP_ON_ERROR
6786         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6787                   " so reset not done to allow debug dump,\n"
6788          KERN_ERR " you will need to reboot when done\n");
6789         return;
6790 #endif
6791
6792         rtnl_lock();
6793
6794         if (!netif_running(bp->dev))
6795                 goto reset_task_exit;
6796
6797         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6798         bnx2x_nic_load(bp, LOAD_NORMAL);
6799
6800 reset_task_exit:
6801         rtnl_unlock();
6802 }
6803
6804 /* end of nic load/unload */
6805
6806 /* ethtool_ops */
6807
6808 /*
6809  * Init service functions
6810  */
6811
6812 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6813 {
6814         u32 val;
6815
6816         /* Check if there is any driver already loaded */
6817         val = REG_RD(bp, MISC_REG_UNPREPARED);
6818         if (val == 0x1) {
6819                 /* Check if it is the UNDI driver
6820                  * UNDI driver initializes CID offset for normal bell to 0x7
6821                  */
6822                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6823                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6824                 if (val == 0x7) {
6825                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6826                         /* save our func */
6827                         int func = BP_FUNC(bp);
6828                         u32 swap_en;
6829                         u32 swap_val;
6830
6831                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6832
6833                         /* try unload UNDI on port 0 */
6834                         bp->func = 0;
6835                         bp->fw_seq =
6836                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6837                                 DRV_MSG_SEQ_NUMBER_MASK);
6838                         reset_code = bnx2x_fw_command(bp, reset_code);
6839
6840                         /* if UNDI is loaded on the other port */
6841                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6842
6843                                 /* send "DONE" for previous unload */
6844                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6845
6846                                 /* unload UNDI on port 1 */
6847                                 bp->func = 1;
6848                                 bp->fw_seq =
6849                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6850                                         DRV_MSG_SEQ_NUMBER_MASK);
6851                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6852
6853                                 bnx2x_fw_command(bp, reset_code);
6854                         }
6855
6856                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6857                                     HC_REG_CONFIG_0), 0x1000);
6858
6859                         /* close input traffic and wait for it */
6860                         /* Do not rcv packets to BRB */
6861                         REG_WR(bp,
6862                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6863                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6864                         /* Do not direct rcv packets that are not for MCP to
6865                          * the BRB */
6866                         REG_WR(bp,
6867                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6868                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6869                         /* clear AEU */
6870                         REG_WR(bp,
6871                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6872                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6873                         msleep(10);
6874
6875                         /* save NIG port swap info */
6876                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6877                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6878                         /* reset device */
6879                         REG_WR(bp,
6880                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6881                                0xd3ffffff);
6882                         REG_WR(bp,
6883                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6884                                0x1403);
6885                         /* take the NIG out of reset and restore swap values */
6886                         REG_WR(bp,
6887                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6888                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6889                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6890                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6891
6892                         /* send unload done to the MCP */
6893                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6894
6895                         /* restore our func and fw_seq */
6896                         bp->func = func;
6897                         bp->fw_seq =
6898                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6899                                 DRV_MSG_SEQ_NUMBER_MASK);
6900                 }
6901                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6902         }
6903 }
6904
6905 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6906 {
6907         u32 val, val2, val3, val4, id;
6908         u16 pmc;
6909
6910         /* Get the chip revision id and number. */
6911         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6912         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6913         id = ((val & 0xffff) << 16);
6914         val = REG_RD(bp, MISC_REG_CHIP_REV);
6915         id |= ((val & 0xf) << 12);
6916         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6917         id |= ((val & 0xff) << 4);
6918         REG_RD(bp, MISC_REG_BOND_ID);
6919         id |= (val & 0xf);
6920         bp->common.chip_id = id;
6921         bp->link_params.chip_id = bp->common.chip_id;
6922         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6923
6924         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6925         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6926                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6927         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6928                        bp->common.flash_size, bp->common.flash_size);
6929
6930         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6931         bp->link_params.shmem_base = bp->common.shmem_base;
6932         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6933
6934         if (!bp->common.shmem_base ||
6935             (bp->common.shmem_base < 0xA0000) ||
6936             (bp->common.shmem_base >= 0xC0000)) {
6937                 BNX2X_DEV_INFO("MCP not active\n");
6938                 bp->flags |= NO_MCP_FLAG;
6939                 return;
6940         }
6941
6942         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6943         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6944                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6945                 BNX2X_ERR("BAD MCP validity signature\n");
6946
6947         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6948         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6949
6950         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6951                        bp->common.hw_config, bp->common.board);
6952
6953         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6954                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6955                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6956
6957         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6958         bp->common.bc_ver = val;
6959         BNX2X_DEV_INFO("bc_ver %X\n", val);
6960         if (val < BNX2X_BC_VER) {
6961                 /* for now only warn
6962                  * later we might need to enforce this */
6963                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6964                           " please upgrade BC\n", BNX2X_BC_VER, val);
6965         }
6966
6967         if (BP_E1HVN(bp) == 0) {
6968                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6969                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6970         } else {
6971                 /* no WOL capability for E1HVN != 0 */
6972                 bp->flags |= NO_WOL_FLAG;
6973         }
6974         BNX2X_DEV_INFO("%sWoL capable\n",
6975                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6976
6977         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6978         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6979         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6980         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6981
6982         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6983                val, val2, val3, val4);
6984 }
6985
6986 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6987                                                     u32 switch_cfg)
6988 {
6989         int port = BP_PORT(bp);
6990         u32 ext_phy_type;
6991
6992         switch (switch_cfg) {
6993         case SWITCH_CFG_1G:
6994                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6995
6996                 ext_phy_type =
6997                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6998                 switch (ext_phy_type) {
6999                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7000                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7001                                        ext_phy_type);
7002
7003                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7004                                                SUPPORTED_10baseT_Full |
7005                                                SUPPORTED_100baseT_Half |
7006                                                SUPPORTED_100baseT_Full |
7007                                                SUPPORTED_1000baseT_Full |
7008                                                SUPPORTED_2500baseX_Full |
7009                                                SUPPORTED_TP |
7010                                                SUPPORTED_FIBRE |
7011                                                SUPPORTED_Autoneg |
7012                                                SUPPORTED_Pause |
7013                                                SUPPORTED_Asym_Pause);
7014                         break;
7015
7016                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7017                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7018                                        ext_phy_type);
7019
7020                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7021                                                SUPPORTED_10baseT_Full |
7022                                                SUPPORTED_100baseT_Half |
7023                                                SUPPORTED_100baseT_Full |
7024                                                SUPPORTED_1000baseT_Full |
7025                                                SUPPORTED_TP |
7026                                                SUPPORTED_FIBRE |
7027                                                SUPPORTED_Autoneg |
7028                                                SUPPORTED_Pause |
7029                                                SUPPORTED_Asym_Pause);
7030                         break;
7031
7032                 default:
7033                         BNX2X_ERR("NVRAM config error. "
7034                                   "BAD SerDes ext_phy_config 0x%x\n",
7035                                   bp->link_params.ext_phy_config);
7036                         return;
7037                 }
7038
7039                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7040                                            port*0x10);
7041                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7042                 break;
7043
7044         case SWITCH_CFG_10G:
7045                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7046
7047                 ext_phy_type =
7048                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7049                 switch (ext_phy_type) {
7050                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7051                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7052                                        ext_phy_type);
7053
7054                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7055                                                SUPPORTED_10baseT_Full |
7056                                                SUPPORTED_100baseT_Half |
7057                                                SUPPORTED_100baseT_Full |
7058                                                SUPPORTED_1000baseT_Full |
7059                                                SUPPORTED_2500baseX_Full |
7060                                                SUPPORTED_10000baseT_Full |
7061                                                SUPPORTED_TP |
7062                                                SUPPORTED_FIBRE |
7063                                                SUPPORTED_Autoneg |
7064                                                SUPPORTED_Pause |
7065                                                SUPPORTED_Asym_Pause);
7066                         break;
7067
7068                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7069                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7070                                        ext_phy_type);
7071
7072                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7073                                                SUPPORTED_FIBRE |
7074                                                SUPPORTED_Pause |
7075                                                SUPPORTED_Asym_Pause);
7076                         break;
7077
7078                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7079                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7080                                        ext_phy_type);
7081
7082                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7083                                                SUPPORTED_1000baseT_Full |
7084                                                SUPPORTED_FIBRE |
7085                                                SUPPORTED_Pause |
7086                                                SUPPORTED_Asym_Pause);
7087                         break;
7088
7089                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7090                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7091                                        ext_phy_type);
7092
7093                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7094                                                SUPPORTED_1000baseT_Full |
7095                                                SUPPORTED_FIBRE |
7096                                                SUPPORTED_Autoneg |
7097                                                SUPPORTED_Pause |
7098                                                SUPPORTED_Asym_Pause);
7099                         break;
7100
7101                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7102                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7103                                        ext_phy_type);
7104
7105                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7106                                                SUPPORTED_2500baseX_Full |
7107                                                SUPPORTED_1000baseT_Full |
7108                                                SUPPORTED_FIBRE |
7109                                                SUPPORTED_Autoneg |
7110                                                SUPPORTED_Pause |
7111                                                SUPPORTED_Asym_Pause);
7112                         break;
7113
7114                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7115                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7116                                        ext_phy_type);
7117
7118                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7119                                                SUPPORTED_TP |
7120                                                SUPPORTED_Autoneg |
7121                                                SUPPORTED_Pause |
7122                                                SUPPORTED_Asym_Pause);
7123                         break;
7124
7125                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7126                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7127                                   bp->link_params.ext_phy_config);
7128                         break;
7129
7130                 default:
7131                         BNX2X_ERR("NVRAM config error. "
7132                                   "BAD XGXS ext_phy_config 0x%x\n",
7133                                   bp->link_params.ext_phy_config);
7134                         return;
7135                 }
7136
7137                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7138                                            port*0x18);
7139                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7140
7141                 break;
7142
7143         default:
7144                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7145                           bp->port.link_config);
7146                 return;
7147         }
7148         bp->link_params.phy_addr = bp->port.phy_addr;
7149
7150         /* mask what we support according to speed_cap_mask */
7151         if (!(bp->link_params.speed_cap_mask &
7152                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7153                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7154
7155         if (!(bp->link_params.speed_cap_mask &
7156                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7157                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7158
7159         if (!(bp->link_params.speed_cap_mask &
7160                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7161                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7162
7163         if (!(bp->link_params.speed_cap_mask &
7164                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7165                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7166
7167         if (!(bp->link_params.speed_cap_mask &
7168                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7169                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7170                                         SUPPORTED_1000baseT_Full);
7171
7172         if (!(bp->link_params.speed_cap_mask &
7173                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7174                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7175
7176         if (!(bp->link_params.speed_cap_mask &
7177                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7178                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7179
7180         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7181 }
7182
7183 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7184 {
7185         bp->link_params.req_duplex = DUPLEX_FULL;
7186
7187         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7188         case PORT_FEATURE_LINK_SPEED_AUTO:
7189                 if (bp->port.supported & SUPPORTED_Autoneg) {
7190                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7191                         bp->port.advertising = bp->port.supported;
7192                 } else {
7193                         u32 ext_phy_type =
7194                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7195
7196                         if ((ext_phy_type ==
7197                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7198                             (ext_phy_type ==
7199                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7200                                 /* force 10G, no AN */
7201                                 bp->link_params.req_line_speed = SPEED_10000;
7202                                 bp->port.advertising =
7203                                                 (ADVERTISED_10000baseT_Full |
7204                                                  ADVERTISED_FIBRE);
7205                                 break;
7206                         }
7207                         BNX2X_ERR("NVRAM config error. "
7208                                   "Invalid link_config 0x%x"
7209                                   "  Autoneg not supported\n",
7210                                   bp->port.link_config);
7211                         return;
7212                 }
7213                 break;
7214
7215         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7216                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7217                         bp->link_params.req_line_speed = SPEED_10;
7218                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7219                                                 ADVERTISED_TP);
7220                 } else {
7221                         BNX2X_ERR("NVRAM config error. "
7222                                   "Invalid link_config 0x%x"
7223                                   "  speed_cap_mask 0x%x\n",
7224                                   bp->port.link_config,
7225                                   bp->link_params.speed_cap_mask);
7226                         return;
7227                 }
7228                 break;
7229
7230         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7231                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7232                         bp->link_params.req_line_speed = SPEED_10;
7233                         bp->link_params.req_duplex = DUPLEX_HALF;
7234                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7235                                                 ADVERTISED_TP);
7236                 } else {
7237                         BNX2X_ERR("NVRAM config error. "
7238                                   "Invalid link_config 0x%x"
7239                                   "  speed_cap_mask 0x%x\n",
7240                                   bp->port.link_config,
7241                                   bp->link_params.speed_cap_mask);
7242                         return;
7243                 }
7244                 break;
7245
7246         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7247                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7248                         bp->link_params.req_line_speed = SPEED_100;
7249                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7250                                                 ADVERTISED_TP);
7251                 } else {
7252                         BNX2X_ERR("NVRAM config error. "
7253                                   "Invalid link_config 0x%x"
7254                                   "  speed_cap_mask 0x%x\n",
7255                                   bp->port.link_config,
7256                                   bp->link_params.speed_cap_mask);
7257                         return;
7258                 }
7259                 break;
7260
7261         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7262                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7263                         bp->link_params.req_line_speed = SPEED_100;
7264                         bp->link_params.req_duplex = DUPLEX_HALF;
7265                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7266                                                 ADVERTISED_TP);
7267                 } else {
7268                         BNX2X_ERR("NVRAM config error. "
7269                                   "Invalid link_config 0x%x"
7270                                   "  speed_cap_mask 0x%x\n",
7271                                   bp->port.link_config,
7272                                   bp->link_params.speed_cap_mask);
7273                         return;
7274                 }
7275                 break;
7276
7277         case PORT_FEATURE_LINK_SPEED_1G:
7278                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7279                         bp->link_params.req_line_speed = SPEED_1000;
7280                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7281                                                 ADVERTISED_TP);
7282                 } else {
7283                         BNX2X_ERR("NVRAM config error. "
7284                                   "Invalid link_config 0x%x"
7285                                   "  speed_cap_mask 0x%x\n",
7286                                   bp->port.link_config,
7287                                   bp->link_params.speed_cap_mask);
7288                         return;
7289                 }
7290                 break;
7291
7292         case PORT_FEATURE_LINK_SPEED_2_5G:
7293                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7294                         bp->link_params.req_line_speed = SPEED_2500;
7295                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7296                                                 ADVERTISED_TP);
7297                 } else {
7298                         BNX2X_ERR("NVRAM config error. "
7299                                   "Invalid link_config 0x%x"
7300                                   "  speed_cap_mask 0x%x\n",
7301                                   bp->port.link_config,
7302                                   bp->link_params.speed_cap_mask);
7303                         return;
7304                 }
7305                 break;
7306
7307         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7308         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7309         case PORT_FEATURE_LINK_SPEED_10G_KR:
7310                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7311                         bp->link_params.req_line_speed = SPEED_10000;
7312                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7313                                                 ADVERTISED_FIBRE);
7314                 } else {
7315                         BNX2X_ERR("NVRAM config error. "
7316                                   "Invalid link_config 0x%x"
7317                                   "  speed_cap_mask 0x%x\n",
7318                                   bp->port.link_config,
7319                                   bp->link_params.speed_cap_mask);
7320                         return;
7321                 }
7322                 break;
7323
7324         default:
7325                 BNX2X_ERR("NVRAM config error. "
7326                           "BAD link speed link_config 0x%x\n",
7327                           bp->port.link_config);
7328                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7329                 bp->port.advertising = bp->port.supported;
7330                 break;
7331         }
7332
7333         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7334                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7335         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7336             !(bp->port.supported & SUPPORTED_Autoneg))
7337                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7338
7339         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7340                        "  advertising 0x%x\n",
7341                        bp->link_params.req_line_speed,
7342                        bp->link_params.req_duplex,
7343                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7344 }
7345
7346 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7347 {
7348         int port = BP_PORT(bp);
7349         u32 val, val2;
7350
7351         bp->link_params.bp = bp;
7352         bp->link_params.port = port;
7353
7354         bp->link_params.serdes_config =
7355                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7356         bp->link_params.lane_config =
7357                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7358         bp->link_params.ext_phy_config =
7359                 SHMEM_RD(bp,
7360                          dev_info.port_hw_config[port].external_phy_config);
7361         bp->link_params.speed_cap_mask =
7362                 SHMEM_RD(bp,
7363                          dev_info.port_hw_config[port].speed_capability_mask);
7364
7365         bp->port.link_config =
7366                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7367
7368         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7369              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7370                        "  link_config 0x%08x\n",
7371                        bp->link_params.serdes_config,
7372                        bp->link_params.lane_config,
7373                        bp->link_params.ext_phy_config,
7374                        bp->link_params.speed_cap_mask, bp->port.link_config);
7375
7376         bp->link_params.switch_cfg = (bp->port.link_config &
7377                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7378         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7379
7380         bnx2x_link_settings_requested(bp);
7381
7382         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7383         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7384         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7385         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7386         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7387         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7388         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7389         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7390         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7391         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7392 }
7393
7394 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7395 {
7396         int func = BP_FUNC(bp);
7397         u32 val, val2;
7398         int rc = 0;
7399
7400         bnx2x_get_common_hwinfo(bp);
7401
7402         bp->e1hov = 0;
7403         bp->e1hmf = 0;
7404         if (CHIP_IS_E1H(bp)) {
7405                 bp->mf_config =
7406                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7407
7408                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7409                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7410                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7411
7412                         bp->e1hov = val;
7413                         bp->e1hmf = 1;
7414                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7415                                        "(0x%04x)\n",
7416                                        func, bp->e1hov, bp->e1hov);
7417                 } else {
7418                         BNX2X_DEV_INFO("Single function mode\n");
7419                         if (BP_E1HVN(bp)) {
7420                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7421                                           "  aborting\n", func);
7422                                 rc = -EPERM;
7423                         }
7424                 }
7425         }
7426
7427         if (!BP_NOMCP(bp)) {
7428                 bnx2x_get_port_hwinfo(bp);
7429
7430                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7431                               DRV_MSG_SEQ_NUMBER_MASK);
7432                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7433         }
7434
7435         if (IS_E1HMF(bp)) {
7436                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7437                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7438                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7439                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7440                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7441                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7442                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7443                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7444                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7445                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7446                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7447                                ETH_ALEN);
7448                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7449                                ETH_ALEN);
7450                 }
7451
7452                 return rc;
7453         }
7454
7455         if (BP_NOMCP(bp)) {
7456                 /* only supposed to happen on emulation/FPGA */
7457                 BNX2X_ERR("warning random MAC workaround active\n");
7458                 random_ether_addr(bp->dev->dev_addr);
7459                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7460         }
7461
7462         return rc;
7463 }
7464
7465 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7466 {
7467         int func = BP_FUNC(bp);
7468         int rc;
7469
7470         /* Disable interrupt handling until HW is initialized */
7471         atomic_set(&bp->intr_sem, 1);
7472
7473         mutex_init(&bp->port.phy_mutex);
7474
7475         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7476         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7477
7478         rc = bnx2x_get_hwinfo(bp);
7479
7480         /* need to reset chip if undi was active */
7481         if (!BP_NOMCP(bp))
7482                 bnx2x_undi_unload(bp);
7483
7484         if (CHIP_REV_IS_FPGA(bp))
7485                 printk(KERN_ERR PFX "FPGA detected\n");
7486
7487         if (BP_NOMCP(bp) && (func == 0))
7488                 printk(KERN_ERR PFX
7489                        "MCP disabled, must load devices in order!\n");
7490
7491         /* Set TPA flags */
7492         if (disable_tpa) {
7493                 bp->flags &= ~TPA_ENABLE_FLAG;
7494                 bp->dev->features &= ~NETIF_F_LRO;
7495         } else {
7496                 bp->flags |= TPA_ENABLE_FLAG;
7497                 bp->dev->features |= NETIF_F_LRO;
7498         }
7499
7500
7501         bp->tx_ring_size = MAX_TX_AVAIL;
7502         bp->rx_ring_size = MAX_RX_AVAIL;
7503
7504         bp->rx_csum = 1;
7505         bp->rx_offset = 0;
7506
7507         bp->tx_ticks = 50;
7508         bp->rx_ticks = 25;
7509
7510         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7511         bp->current_interval = (poll ? poll : bp->timer_interval);
7512
7513         init_timer(&bp->timer);
7514         bp->timer.expires = jiffies + bp->current_interval;
7515         bp->timer.data = (unsigned long) bp;
7516         bp->timer.function = bnx2x_timer;
7517
7518         return rc;
7519 }
7520
7521 /*
7522  * ethtool service functions
7523  */
7524
7525 /* All ethtool functions called with rtnl_lock */
7526
7527 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7528 {
7529         struct bnx2x *bp = netdev_priv(dev);
7530
7531         cmd->supported = bp->port.supported;
7532         cmd->advertising = bp->port.advertising;
7533
7534         if (netif_carrier_ok(dev)) {
7535                 cmd->speed = bp->link_vars.line_speed;
7536                 cmd->duplex = bp->link_vars.duplex;
7537         } else {
7538                 cmd->speed = bp->link_params.req_line_speed;
7539                 cmd->duplex = bp->link_params.req_duplex;
7540         }
7541         if (IS_E1HMF(bp)) {
7542                 u16 vn_max_rate;
7543
7544                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7545                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7546                 if (vn_max_rate < cmd->speed)
7547                         cmd->speed = vn_max_rate;
7548         }
7549
7550         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7551                 u32 ext_phy_type =
7552                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7553
7554                 switch (ext_phy_type) {
7555                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7556                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7557                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7558                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7559                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7560                         cmd->port = PORT_FIBRE;
7561                         break;
7562
7563                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7564                         cmd->port = PORT_TP;
7565                         break;
7566
7567                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7568                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7569                                   bp->link_params.ext_phy_config);
7570                         break;
7571
7572                 default:
7573                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7574                            bp->link_params.ext_phy_config);
7575                         break;
7576                 }
7577         } else
7578                 cmd->port = PORT_TP;
7579
7580         cmd->phy_address = bp->port.phy_addr;
7581         cmd->transceiver = XCVR_INTERNAL;
7582
7583         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7584                 cmd->autoneg = AUTONEG_ENABLE;
7585         else
7586                 cmd->autoneg = AUTONEG_DISABLE;
7587
7588         cmd->maxtxpkt = 0;
7589         cmd->maxrxpkt = 0;
7590
7591         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7592            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7593            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7594            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7595            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7596            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7597            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7598
7599         return 0;
7600 }
7601
7602 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7603 {
7604         struct bnx2x *bp = netdev_priv(dev);
7605         u32 advertising;
7606
7607         if (IS_E1HMF(bp))
7608                 return 0;
7609
7610         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7611            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7612            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7613            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7614            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7615            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7616            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7617
7618         if (cmd->autoneg == AUTONEG_ENABLE) {
7619                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7620                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7621                         return -EINVAL;
7622                 }
7623
7624                 /* advertise the requested speed and duplex if supported */
7625                 cmd->advertising &= bp->port.supported;
7626
7627                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7628                 bp->link_params.req_duplex = DUPLEX_FULL;
7629                 bp->port.advertising |= (ADVERTISED_Autoneg |
7630                                          cmd->advertising);
7631
7632         } else { /* forced speed */
7633                 /* advertise the requested speed and duplex if supported */
7634                 switch (cmd->speed) {
7635                 case SPEED_10:
7636                         if (cmd->duplex == DUPLEX_FULL) {
7637                                 if (!(bp->port.supported &
7638                                       SUPPORTED_10baseT_Full)) {
7639                                         DP(NETIF_MSG_LINK,
7640                                            "10M full not supported\n");
7641                                         return -EINVAL;
7642                                 }
7643
7644                                 advertising = (ADVERTISED_10baseT_Full |
7645                                                ADVERTISED_TP);
7646                         } else {
7647                                 if (!(bp->port.supported &
7648                                       SUPPORTED_10baseT_Half)) {
7649                                         DP(NETIF_MSG_LINK,
7650                                            "10M half not supported\n");
7651                                         return -EINVAL;
7652                                 }
7653
7654                                 advertising = (ADVERTISED_10baseT_Half |
7655                                                ADVERTISED_TP);
7656                         }
7657                         break;
7658
7659                 case SPEED_100:
7660                         if (cmd->duplex == DUPLEX_FULL) {
7661                                 if (!(bp->port.supported &
7662                                                 SUPPORTED_100baseT_Full)) {
7663                                         DP(NETIF_MSG_LINK,
7664                                            "100M full not supported\n");
7665                                         return -EINVAL;
7666                                 }
7667
7668                                 advertising = (ADVERTISED_100baseT_Full |
7669                                                ADVERTISED_TP);
7670                         } else {
7671                                 if (!(bp->port.supported &
7672                                                 SUPPORTED_100baseT_Half)) {
7673                                         DP(NETIF_MSG_LINK,
7674                                            "100M half not supported\n");
7675                                         return -EINVAL;
7676                                 }
7677
7678                                 advertising = (ADVERTISED_100baseT_Half |
7679                                                ADVERTISED_TP);
7680                         }
7681                         break;
7682
7683                 case SPEED_1000:
7684                         if (cmd->duplex != DUPLEX_FULL) {
7685                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7686                                 return -EINVAL;
7687                         }
7688
7689                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7690                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7691                                 return -EINVAL;
7692                         }
7693
7694                         advertising = (ADVERTISED_1000baseT_Full |
7695                                        ADVERTISED_TP);
7696                         break;
7697
7698                 case SPEED_2500:
7699                         if (cmd->duplex != DUPLEX_FULL) {
7700                                 DP(NETIF_MSG_LINK,
7701                                    "2.5G half not supported\n");
7702                                 return -EINVAL;
7703                         }
7704
7705                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7706                                 DP(NETIF_MSG_LINK,
7707                                    "2.5G full not supported\n");
7708                                 return -EINVAL;
7709                         }
7710
7711                         advertising = (ADVERTISED_2500baseX_Full |
7712                                        ADVERTISED_TP);
7713                         break;
7714
7715                 case SPEED_10000:
7716                         if (cmd->duplex != DUPLEX_FULL) {
7717                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7718                                 return -EINVAL;
7719                         }
7720
7721                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7722                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7723                                 return -EINVAL;
7724                         }
7725
7726                         advertising = (ADVERTISED_10000baseT_Full |
7727                                        ADVERTISED_FIBRE);
7728                         break;
7729
7730                 default:
7731                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7732                         return -EINVAL;
7733                 }
7734
7735                 bp->link_params.req_line_speed = cmd->speed;
7736                 bp->link_params.req_duplex = cmd->duplex;
7737                 bp->port.advertising = advertising;
7738         }
7739
7740         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7741            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7742            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7743            bp->port.advertising);
7744
7745         if (netif_running(dev)) {
7746                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7747                 bnx2x_link_set(bp);
7748         }
7749
7750         return 0;
7751 }
7752
7753 #define PHY_FW_VER_LEN                  10
7754
7755 static void bnx2x_get_drvinfo(struct net_device *dev,
7756                               struct ethtool_drvinfo *info)
7757 {
7758         struct bnx2x *bp = netdev_priv(dev);
7759         u8 phy_fw_ver[PHY_FW_VER_LEN];
7760
7761         strcpy(info->driver, DRV_MODULE_NAME);
7762         strcpy(info->version, DRV_MODULE_VERSION);
7763
7764         phy_fw_ver[0] = '\0';
7765         if (bp->port.pmf) {
7766                 bnx2x_acquire_phy_lock(bp);
7767                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7768                                              (bp->state != BNX2X_STATE_CLOSED),
7769                                              phy_fw_ver, PHY_FW_VER_LEN);
7770                 bnx2x_release_phy_lock(bp);
7771         }
7772
7773         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7774                  (bp->common.bc_ver & 0xff0000) >> 16,
7775                  (bp->common.bc_ver & 0xff00) >> 8,
7776                  (bp->common.bc_ver & 0xff),
7777                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7778         strcpy(info->bus_info, pci_name(bp->pdev));
7779         info->n_stats = BNX2X_NUM_STATS;
7780         info->testinfo_len = BNX2X_NUM_TESTS;
7781         info->eedump_len = bp->common.flash_size;
7782         info->regdump_len = 0;
7783 }
7784
7785 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7786 {
7787         struct bnx2x *bp = netdev_priv(dev);
7788
7789         if (bp->flags & NO_WOL_FLAG) {
7790                 wol->supported = 0;
7791                 wol->wolopts = 0;
7792         } else {
7793                 wol->supported = WAKE_MAGIC;
7794                 if (bp->wol)
7795                         wol->wolopts = WAKE_MAGIC;
7796                 else
7797                         wol->wolopts = 0;
7798         }
7799         memset(&wol->sopass, 0, sizeof(wol->sopass));
7800 }
7801
7802 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7803 {
7804         struct bnx2x *bp = netdev_priv(dev);
7805
7806         if (wol->wolopts & ~WAKE_MAGIC)
7807                 return -EINVAL;
7808
7809         if (wol->wolopts & WAKE_MAGIC) {
7810                 if (bp->flags & NO_WOL_FLAG)
7811                         return -EINVAL;
7812
7813                 bp->wol = 1;
7814         } else
7815                 bp->wol = 0;
7816
7817         return 0;
7818 }
7819
7820 static u32 bnx2x_get_msglevel(struct net_device *dev)
7821 {
7822         struct bnx2x *bp = netdev_priv(dev);
7823
7824         return bp->msglevel;
7825 }
7826
7827 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7828 {
7829         struct bnx2x *bp = netdev_priv(dev);
7830
7831         if (capable(CAP_NET_ADMIN))
7832                 bp->msglevel = level;
7833 }
7834
7835 static int bnx2x_nway_reset(struct net_device *dev)
7836 {
7837         struct bnx2x *bp = netdev_priv(dev);
7838
7839         if (!bp->port.pmf)
7840                 return 0;
7841
7842         if (netif_running(dev)) {
7843                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7844                 bnx2x_link_set(bp);
7845         }
7846
7847         return 0;
7848 }
7849
7850 static int bnx2x_get_eeprom_len(struct net_device *dev)
7851 {
7852         struct bnx2x *bp = netdev_priv(dev);
7853
7854         return bp->common.flash_size;
7855 }
7856
7857 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7858 {
7859         int port = BP_PORT(bp);
7860         int count, i;
7861         u32 val = 0;
7862
7863         /* adjust timeout for emulation/FPGA */
7864         count = NVRAM_TIMEOUT_COUNT;
7865         if (CHIP_REV_IS_SLOW(bp))
7866                 count *= 100;
7867
7868         /* request access to nvram interface */
7869         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7870                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7871
7872         for (i = 0; i < count*10; i++) {
7873                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7874                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7875                         break;
7876
7877                 udelay(5);
7878         }
7879
7880         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7881                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7882                 return -EBUSY;
7883         }
7884
7885         return 0;
7886 }
7887
7888 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7889 {
7890         int port = BP_PORT(bp);
7891         int count, i;
7892         u32 val = 0;
7893
7894         /* adjust timeout for emulation/FPGA */
7895         count = NVRAM_TIMEOUT_COUNT;
7896         if (CHIP_REV_IS_SLOW(bp))
7897                 count *= 100;
7898
7899         /* relinquish nvram interface */
7900         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7901                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7902
7903         for (i = 0; i < count*10; i++) {
7904                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7905                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7906                         break;
7907
7908                 udelay(5);
7909         }
7910
7911         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7912                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7913                 return -EBUSY;
7914         }
7915
7916         return 0;
7917 }
7918
7919 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7920 {
7921         u32 val;
7922
7923         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7924
7925         /* enable both bits, even on read */
7926         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7927                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7928                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7929 }
7930
7931 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7932 {
7933         u32 val;
7934
7935         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7936
7937         /* disable both bits, even after read */
7938         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7939                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7940                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7941 }
7942
7943 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7944                                   u32 cmd_flags)
7945 {
7946         int count, i, rc;
7947         u32 val;
7948
7949         /* build the command word */
7950         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7951
7952         /* need to clear DONE bit separately */
7953         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7954
7955         /* address of the NVRAM to read from */
7956         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7957                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7958
7959         /* issue a read command */
7960         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7961
7962         /* adjust timeout for emulation/FPGA */
7963         count = NVRAM_TIMEOUT_COUNT;
7964         if (CHIP_REV_IS_SLOW(bp))
7965                 count *= 100;
7966
7967         /* wait for completion */
7968         *ret_val = 0;
7969         rc = -EBUSY;
7970         for (i = 0; i < count; i++) {
7971                 udelay(5);
7972                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7973
7974                 if (val & MCPR_NVM_COMMAND_DONE) {
7975                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7976                         /* we read nvram data in cpu order
7977                          * but ethtool sees it as an array of bytes
7978                          * converting to big-endian will do the work */
7979                         val = cpu_to_be32(val);
7980                         *ret_val = val;
7981                         rc = 0;
7982                         break;
7983                 }
7984         }
7985
7986         return rc;
7987 }
7988
7989 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7990                             int buf_size)
7991 {
7992         int rc;
7993         u32 cmd_flags;
7994         u32 val;
7995
7996         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7997                 DP(BNX2X_MSG_NVM,
7998                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
7999                    offset, buf_size);
8000                 return -EINVAL;
8001         }
8002
8003         if (offset + buf_size > bp->common.flash_size) {
8004                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8005                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8006                    offset, buf_size, bp->common.flash_size);
8007                 return -EINVAL;
8008         }
8009
8010         /* request access to nvram interface */
8011         rc = bnx2x_acquire_nvram_lock(bp);
8012         if (rc)
8013                 return rc;
8014
8015         /* enable access to nvram interface */
8016         bnx2x_enable_nvram_access(bp);
8017
8018         /* read the first word(s) */
8019         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8020         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8021                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8022                 memcpy(ret_buf, &val, 4);
8023
8024                 /* advance to the next dword */
8025                 offset += sizeof(u32);
8026                 ret_buf += sizeof(u32);
8027                 buf_size -= sizeof(u32);
8028                 cmd_flags = 0;
8029         }
8030
8031         if (rc == 0) {
8032                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8033                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8034                 memcpy(ret_buf, &val, 4);
8035         }
8036
8037         /* disable access to nvram interface */
8038         bnx2x_disable_nvram_access(bp);
8039         bnx2x_release_nvram_lock(bp);
8040
8041         return rc;
8042 }
8043
8044 static int bnx2x_get_eeprom(struct net_device *dev,
8045                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8046 {
8047         struct bnx2x *bp = netdev_priv(dev);
8048         int rc;
8049
8050         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8051            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8052            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8053            eeprom->len, eeprom->len);
8054
8055         /* parameters already validated in ethtool_get_eeprom */
8056
8057         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8058
8059         return rc;
8060 }
8061
8062 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8063                                    u32 cmd_flags)
8064 {
8065         int count, i, rc;
8066
8067         /* build the command word */
8068         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8069
8070         /* need to clear DONE bit separately */
8071         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8072
8073         /* write the data */
8074         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8075
8076         /* address of the NVRAM to write to */
8077         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8078                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8079
8080         /* issue the write command */
8081         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8082
8083         /* adjust timeout for emulation/FPGA */
8084         count = NVRAM_TIMEOUT_COUNT;
8085         if (CHIP_REV_IS_SLOW(bp))
8086                 count *= 100;
8087
8088         /* wait for completion */
8089         rc = -EBUSY;
8090         for (i = 0; i < count; i++) {
8091                 udelay(5);
8092                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8093                 if (val & MCPR_NVM_COMMAND_DONE) {
8094                         rc = 0;
8095                         break;
8096                 }
8097         }
8098
8099         return rc;
8100 }
8101
8102 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8103
8104 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8105                               int buf_size)
8106 {
8107         int rc;
8108         u32 cmd_flags;
8109         u32 align_offset;
8110         u32 val;
8111
8112         if (offset + buf_size > bp->common.flash_size) {
8113                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8114                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8115                    offset, buf_size, bp->common.flash_size);
8116                 return -EINVAL;
8117         }
8118
8119         /* request access to nvram interface */
8120         rc = bnx2x_acquire_nvram_lock(bp);
8121         if (rc)
8122                 return rc;
8123
8124         /* enable access to nvram interface */
8125         bnx2x_enable_nvram_access(bp);
8126
8127         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8128         align_offset = (offset & ~0x03);
8129         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8130
8131         if (rc == 0) {
8132                 val &= ~(0xff << BYTE_OFFSET(offset));
8133                 val |= (*data_buf << BYTE_OFFSET(offset));
8134
8135                 /* nvram data is returned as an array of bytes
8136                  * convert it back to cpu order */
8137                 val = be32_to_cpu(val);
8138
8139                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8140                                              cmd_flags);
8141         }
8142
8143         /* disable access to nvram interface */
8144         bnx2x_disable_nvram_access(bp);
8145         bnx2x_release_nvram_lock(bp);
8146
8147         return rc;
8148 }
8149
8150 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8151                              int buf_size)
8152 {
8153         int rc;
8154         u32 cmd_flags;
8155         u32 val;
8156         u32 written_so_far;
8157
8158         if (buf_size == 1)      /* ethtool */
8159                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8160
8161         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8162                 DP(BNX2X_MSG_NVM,
8163                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8164                    offset, buf_size);
8165                 return -EINVAL;
8166         }
8167
8168         if (offset + buf_size > bp->common.flash_size) {
8169                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8170                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8171                    offset, buf_size, bp->common.flash_size);
8172                 return -EINVAL;
8173         }
8174
8175         /* request access to nvram interface */
8176         rc = bnx2x_acquire_nvram_lock(bp);
8177         if (rc)
8178                 return rc;
8179
8180         /* enable access to nvram interface */
8181         bnx2x_enable_nvram_access(bp);
8182
8183         written_so_far = 0;
8184         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8185         while ((written_so_far < buf_size) && (rc == 0)) {
8186                 if (written_so_far == (buf_size - sizeof(u32)))
8187                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8188                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8189                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8190                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8191                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8192
8193                 memcpy(&val, data_buf, 4);
8194
8195                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8196
8197                 /* advance to the next dword */
8198                 offset += sizeof(u32);
8199                 data_buf += sizeof(u32);
8200                 written_so_far += sizeof(u32);
8201                 cmd_flags = 0;
8202         }
8203
8204         /* disable access to nvram interface */
8205         bnx2x_disable_nvram_access(bp);
8206         bnx2x_release_nvram_lock(bp);
8207
8208         return rc;
8209 }
8210
8211 static int bnx2x_set_eeprom(struct net_device *dev,
8212                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8213 {
8214         struct bnx2x *bp = netdev_priv(dev);
8215         int rc;
8216
8217         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8218            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8219            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8220            eeprom->len, eeprom->len);
8221
8222         /* parameters already validated in ethtool_set_eeprom */
8223
8224         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8225         if (eeprom->magic == 0x00504859)
8226                 if (bp->port.pmf) {
8227
8228                         bnx2x_acquire_phy_lock(bp);
8229                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8230                                              bp->link_params.ext_phy_config,
8231                                              (bp->state != BNX2X_STATE_CLOSED),
8232                                              eebuf, eeprom->len);
8233                         if ((bp->state == BNX2X_STATE_OPEN) ||
8234                             (bp->state == BNX2X_STATE_DISABLED)) {
8235                                 rc |= bnx2x_link_reset(&bp->link_params,
8236                                                        &bp->link_vars);
8237                                 rc |= bnx2x_phy_init(&bp->link_params,
8238                                                      &bp->link_vars);
8239                         }
8240                         bnx2x_release_phy_lock(bp);
8241
8242                 } else /* Only the PMF can access the PHY */
8243                         return -EINVAL;
8244         else
8245                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8246
8247         return rc;
8248 }
8249
8250 static int bnx2x_get_coalesce(struct net_device *dev,
8251                               struct ethtool_coalesce *coal)
8252 {
8253         struct bnx2x *bp = netdev_priv(dev);
8254
8255         memset(coal, 0, sizeof(struct ethtool_coalesce));
8256
8257         coal->rx_coalesce_usecs = bp->rx_ticks;
8258         coal->tx_coalesce_usecs = bp->tx_ticks;
8259
8260         return 0;
8261 }
8262
8263 static int bnx2x_set_coalesce(struct net_device *dev,
8264                               struct ethtool_coalesce *coal)
8265 {
8266         struct bnx2x *bp = netdev_priv(dev);
8267
8268         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8269         if (bp->rx_ticks > 3000)
8270                 bp->rx_ticks = 3000;
8271
8272         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8273         if (bp->tx_ticks > 0x3000)
8274                 bp->tx_ticks = 0x3000;
8275
8276         if (netif_running(dev))
8277                 bnx2x_update_coalesce(bp);
8278
8279         return 0;
8280 }
8281
8282 static void bnx2x_get_ringparam(struct net_device *dev,
8283                                 struct ethtool_ringparam *ering)
8284 {
8285         struct bnx2x *bp = netdev_priv(dev);
8286
8287         ering->rx_max_pending = MAX_RX_AVAIL;
8288         ering->rx_mini_max_pending = 0;
8289         ering->rx_jumbo_max_pending = 0;
8290
8291         ering->rx_pending = bp->rx_ring_size;
8292         ering->rx_mini_pending = 0;
8293         ering->rx_jumbo_pending = 0;
8294
8295         ering->tx_max_pending = MAX_TX_AVAIL;
8296         ering->tx_pending = bp->tx_ring_size;
8297 }
8298
8299 static int bnx2x_set_ringparam(struct net_device *dev,
8300                                struct ethtool_ringparam *ering)
8301 {
8302         struct bnx2x *bp = netdev_priv(dev);
8303         int rc = 0;
8304
8305         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8306             (ering->tx_pending > MAX_TX_AVAIL) ||
8307             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8308                 return -EINVAL;
8309
8310         bp->rx_ring_size = ering->rx_pending;
8311         bp->tx_ring_size = ering->tx_pending;
8312
8313         if (netif_running(dev)) {
8314                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8315                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8316         }
8317
8318         return rc;
8319 }
8320
8321 static void bnx2x_get_pauseparam(struct net_device *dev,
8322                                  struct ethtool_pauseparam *epause)
8323 {
8324         struct bnx2x *bp = netdev_priv(dev);
8325
8326         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8327                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8328
8329         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8330                             FLOW_CTRL_RX);
8331         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8332                             FLOW_CTRL_TX);
8333
8334         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8335            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8336            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8337 }
8338
8339 static int bnx2x_set_pauseparam(struct net_device *dev,
8340                                 struct ethtool_pauseparam *epause)
8341 {
8342         struct bnx2x *bp = netdev_priv(dev);
8343
8344         if (IS_E1HMF(bp))
8345                 return 0;
8346
8347         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8348            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8349            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8350
8351         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8352
8353         if (epause->rx_pause)
8354                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8355
8356         if (epause->tx_pause)
8357                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8358
8359         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8360                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8361
8362         if (epause->autoneg) {
8363                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8364                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8365                         return -EINVAL;
8366                 }
8367
8368                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8369                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8370         }
8371
8372         DP(NETIF_MSG_LINK,
8373            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8374
8375         if (netif_running(dev)) {
8376                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8377                 bnx2x_link_set(bp);
8378         }
8379
8380         return 0;
8381 }
8382
8383 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8384 {
8385         struct bnx2x *bp = netdev_priv(dev);
8386         int changed = 0;
8387         int rc = 0;
8388
8389         /* TPA requires Rx CSUM offloading */
8390         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8391                 if (!(dev->features & NETIF_F_LRO)) {
8392                         dev->features |= NETIF_F_LRO;
8393                         bp->flags |= TPA_ENABLE_FLAG;
8394                         changed = 1;
8395                 }
8396
8397         } else if (dev->features & NETIF_F_LRO) {
8398                 dev->features &= ~NETIF_F_LRO;
8399                 bp->flags &= ~TPA_ENABLE_FLAG;
8400                 changed = 1;
8401         }
8402
8403         if (changed && netif_running(dev)) {
8404                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8405                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8406         }
8407
8408         return rc;
8409 }
8410
8411 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8412 {
8413         struct bnx2x *bp = netdev_priv(dev);
8414
8415         return bp->rx_csum;
8416 }
8417
8418 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8419 {
8420         struct bnx2x *bp = netdev_priv(dev);
8421         int rc = 0;
8422
8423         bp->rx_csum = data;
8424
8425         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8426            TPA'ed packets will be discarded due to wrong TCP CSUM */
8427         if (!data) {
8428                 u32 flags = ethtool_op_get_flags(dev);
8429
8430                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8431         }
8432
8433         return rc;
8434 }
8435
8436 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8437 {
8438         if (data) {
8439                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8440                 dev->features |= NETIF_F_TSO6;
8441         } else {
8442                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8443                 dev->features &= ~NETIF_F_TSO6;
8444         }
8445
8446         return 0;
8447 }
8448
8449 static const struct {
8450         char string[ETH_GSTRING_LEN];
8451 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8452         { "register_test (offline)" },
8453         { "memory_test (offline)" },
8454         { "loopback_test (offline)" },
8455         { "nvram_test (online)" },
8456         { "interrupt_test (online)" },
8457         { "link_test (online)" },
8458         { "idle check (online)" },
8459         { "MC errors (online)" }
8460 };
8461
8462 static int bnx2x_self_test_count(struct net_device *dev)
8463 {
8464         return BNX2X_NUM_TESTS;
8465 }
8466
8467 static int bnx2x_test_registers(struct bnx2x *bp)
8468 {
8469         int idx, i, rc = -ENODEV;
8470         u32 wr_val = 0;
8471         int port = BP_PORT(bp);
8472         static const struct {
8473                 u32  offset0;
8474                 u32  offset1;
8475                 u32  mask;
8476         } reg_tbl[] = {
8477 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8478                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8479                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8480                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8481                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8482                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8483                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8484                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8485                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8486                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8487 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8488                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8489                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8490                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8491                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8492                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8493                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8494                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8495                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8496                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8497 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8498                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8499                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8500                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8501                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8502                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8503                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8504                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8505                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8506                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8507 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8508                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8509                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8510                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8511                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8512                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8513                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8514                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8515
8516                 { 0xffffffff, 0, 0x00000000 }
8517         };
8518
8519         if (!netif_running(bp->dev))
8520                 return rc;
8521
8522         /* Repeat the test twice:
8523            First by writing 0x00000000, second by writing 0xffffffff */
8524         for (idx = 0; idx < 2; idx++) {
8525
8526                 switch (idx) {
8527                 case 0:
8528                         wr_val = 0;
8529                         break;
8530                 case 1:
8531                         wr_val = 0xffffffff;
8532                         break;
8533                 }
8534
8535                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8536                         u32 offset, mask, save_val, val;
8537
8538                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8539                         mask = reg_tbl[i].mask;
8540
8541                         save_val = REG_RD(bp, offset);
8542
8543                         REG_WR(bp, offset, wr_val);
8544                         val = REG_RD(bp, offset);
8545
8546                         /* Restore the original register's value */
8547                         REG_WR(bp, offset, save_val);
8548
8549                         /* verify that value is as expected value */
8550                         if ((val & mask) != (wr_val & mask))
8551                                 goto test_reg_exit;
8552                 }
8553         }
8554
8555         rc = 0;
8556
8557 test_reg_exit:
8558         return rc;
8559 }
8560
8561 static int bnx2x_test_memory(struct bnx2x *bp)
8562 {
8563         int i, j, rc = -ENODEV;
8564         u32 val;
8565         static const struct {
8566                 u32 offset;
8567                 int size;
8568         } mem_tbl[] = {
8569                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8570                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8571                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8572                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8573                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8574                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8575                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8576
8577                 { 0xffffffff, 0 }
8578         };
8579         static const struct {
8580                 char *name;
8581                 u32 offset;
8582                 u32 e1_mask;
8583                 u32 e1h_mask;
8584         } prty_tbl[] = {
8585                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8586                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8587                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8588                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8589                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8590                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8591
8592                 { NULL, 0xffffffff, 0, 0 }
8593         };
8594
8595         if (!netif_running(bp->dev))
8596                 return rc;
8597
8598         /* Go through all the memories */
8599         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8600                 for (j = 0; j < mem_tbl[i].size; j++)
8601                         REG_RD(bp, mem_tbl[i].offset + j*4);
8602
8603         /* Check the parity status */
8604         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8605                 val = REG_RD(bp, prty_tbl[i].offset);
8606                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8607                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8608                         DP(NETIF_MSG_HW,
8609                            "%s is 0x%x\n", prty_tbl[i].name, val);
8610                         goto test_mem_exit;
8611                 }
8612         }
8613
8614         rc = 0;
8615
8616 test_mem_exit:
8617         return rc;
8618 }
8619
8620 static void bnx2x_netif_start(struct bnx2x *bp)
8621 {
8622         int i;
8623
8624         if (atomic_dec_and_test(&bp->intr_sem)) {
8625                 if (netif_running(bp->dev)) {
8626                         bnx2x_int_enable(bp);
8627                         for_each_queue(bp, i)
8628                                 napi_enable(&bnx2x_fp(bp, i, napi));
8629                         if (bp->state == BNX2X_STATE_OPEN)
8630                                 netif_wake_queue(bp->dev);
8631                 }
8632         }
8633 }
8634
8635 static void bnx2x_netif_stop(struct bnx2x *bp)
8636 {
8637         int i;
8638
8639         if (netif_running(bp->dev)) {
8640                 netif_tx_disable(bp->dev);
8641                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8642                 for_each_queue(bp, i)
8643                         napi_disable(&bnx2x_fp(bp, i, napi));
8644         }
8645         bnx2x_int_disable_sync(bp);
8646 }
8647
8648 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8649 {
8650         int cnt = 1000;
8651
8652         if (link_up)
8653                 while (bnx2x_link_test(bp) && cnt--)
8654                         msleep(10);
8655 }
8656
8657 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8658 {
8659         unsigned int pkt_size, num_pkts, i;
8660         struct sk_buff *skb;
8661         unsigned char *packet;
8662         struct bnx2x_fastpath *fp = &bp->fp[0];
8663         u16 tx_start_idx, tx_idx;
8664         u16 rx_start_idx, rx_idx;
8665         u16 pkt_prod;
8666         struct sw_tx_bd *tx_buf;
8667         struct eth_tx_bd *tx_bd;
8668         dma_addr_t mapping;
8669         union eth_rx_cqe *cqe;
8670         u8 cqe_fp_flags;
8671         struct sw_rx_bd *rx_buf;
8672         u16 len;
8673         int rc = -ENODEV;
8674
8675         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8676                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8677                 bnx2x_acquire_phy_lock(bp);
8678                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8679                 bnx2x_release_phy_lock(bp);
8680
8681         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8682                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8683                 bnx2x_acquire_phy_lock(bp);
8684                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8685                 bnx2x_release_phy_lock(bp);
8686                 /* wait until link state is restored */
8687                 bnx2x_wait_for_link(bp, link_up);
8688
8689         } else
8690                 return -EINVAL;
8691
8692         pkt_size = 1514;
8693         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8694         if (!skb) {
8695                 rc = -ENOMEM;
8696                 goto test_loopback_exit;
8697         }
8698         packet = skb_put(skb, pkt_size);
8699         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8700         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8701         for (i = ETH_HLEN; i < pkt_size; i++)
8702                 packet[i] = (unsigned char) (i & 0xff);
8703
8704         num_pkts = 0;
8705         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8706         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8707
8708         pkt_prod = fp->tx_pkt_prod++;
8709         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8710         tx_buf->first_bd = fp->tx_bd_prod;
8711         tx_buf->skb = skb;
8712
8713         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8714         mapping = pci_map_single(bp->pdev, skb->data,
8715                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8716         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8717         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8718         tx_bd->nbd = cpu_to_le16(1);
8719         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8720         tx_bd->vlan = cpu_to_le16(pkt_prod);
8721         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8722                                        ETH_TX_BD_FLAGS_END_BD);
8723         tx_bd->general_data = ((UNICAST_ADDRESS <<
8724                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8725
8726         fp->hw_tx_prods->bds_prod =
8727                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8728         mb(); /* FW restriction: must not reorder writing nbd and packets */
8729         fp->hw_tx_prods->packets_prod =
8730                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8731         DOORBELL(bp, FP_IDX(fp), 0);
8732
8733         mmiowb();
8734
8735         num_pkts++;
8736         fp->tx_bd_prod++;
8737         bp->dev->trans_start = jiffies;
8738
8739         udelay(100);
8740
8741         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8742         if (tx_idx != tx_start_idx + num_pkts)
8743                 goto test_loopback_exit;
8744
8745         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8746         if (rx_idx != rx_start_idx + num_pkts)
8747                 goto test_loopback_exit;
8748
8749         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8750         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8751         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8752                 goto test_loopback_rx_exit;
8753
8754         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8755         if (len != pkt_size)
8756                 goto test_loopback_rx_exit;
8757
8758         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8759         skb = rx_buf->skb;
8760         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8761         for (i = ETH_HLEN; i < pkt_size; i++)
8762                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8763                         goto test_loopback_rx_exit;
8764
8765         rc = 0;
8766
8767 test_loopback_rx_exit:
8768         bp->dev->last_rx = jiffies;
8769
8770         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8771         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8772         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8773         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8774
8775         /* Update producers */
8776         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8777                              fp->rx_sge_prod);
8778         mmiowb(); /* keep prod updates ordered */
8779
8780 test_loopback_exit:
8781         bp->link_params.loopback_mode = LOOPBACK_NONE;
8782
8783         return rc;
8784 }
8785
8786 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8787 {
8788         int rc = 0;
8789
8790         if (!netif_running(bp->dev))
8791                 return BNX2X_LOOPBACK_FAILED;
8792
8793         bnx2x_netif_stop(bp);
8794
8795         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8796                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8797                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8798         }
8799
8800         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8801                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8802                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8803         }
8804
8805         bnx2x_netif_start(bp);
8806
8807         return rc;
8808 }
8809
8810 #define CRC32_RESIDUAL                  0xdebb20e3
8811
8812 static int bnx2x_test_nvram(struct bnx2x *bp)
8813 {
8814         static const struct {
8815                 int offset;
8816                 int size;
8817         } nvram_tbl[] = {
8818                 {     0,  0x14 }, /* bootstrap */
8819                 {  0x14,  0xec }, /* dir */
8820                 { 0x100, 0x350 }, /* manuf_info */
8821                 { 0x450,  0xf0 }, /* feature_info */
8822                 { 0x640,  0x64 }, /* upgrade_key_info */
8823                 { 0x6a4,  0x64 },
8824                 { 0x708,  0x70 }, /* manuf_key_info */
8825                 { 0x778,  0x70 },
8826                 {     0,     0 }
8827         };
8828         u32 buf[0x350 / 4];
8829         u8 *data = (u8 *)buf;
8830         int i, rc;
8831         u32 magic, csum;
8832
8833         rc = bnx2x_nvram_read(bp, 0, data, 4);
8834         if (rc) {
8835                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8836                 goto test_nvram_exit;
8837         }
8838
8839         magic = be32_to_cpu(buf[0]);
8840         if (magic != 0x669955aa) {
8841                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8842                 rc = -ENODEV;
8843                 goto test_nvram_exit;
8844         }
8845
8846         for (i = 0; nvram_tbl[i].size; i++) {
8847
8848                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8849                                       nvram_tbl[i].size);
8850                 if (rc) {
8851                         DP(NETIF_MSG_PROBE,
8852                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8853                         goto test_nvram_exit;
8854                 }
8855
8856                 csum = ether_crc_le(nvram_tbl[i].size, data);
8857                 if (csum != CRC32_RESIDUAL) {
8858                         DP(NETIF_MSG_PROBE,
8859                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8860                         rc = -ENODEV;
8861                         goto test_nvram_exit;
8862                 }
8863         }
8864
8865 test_nvram_exit:
8866         return rc;
8867 }
8868
8869 static int bnx2x_test_intr(struct bnx2x *bp)
8870 {
8871         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8872         int i, rc;
8873
8874         if (!netif_running(bp->dev))
8875                 return -ENODEV;
8876
8877         config->hdr.length_6b = 0;
8878         config->hdr.offset = 0;
8879         config->hdr.client_id = BP_CL_ID(bp);
8880         config->hdr.reserved1 = 0;
8881
8882         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8883                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8884                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8885         if (rc == 0) {
8886                 bp->set_mac_pending++;
8887                 for (i = 0; i < 10; i++) {
8888                         if (!bp->set_mac_pending)
8889                                 break;
8890                         msleep_interruptible(10);
8891                 }
8892                 if (i == 10)
8893                         rc = -ENODEV;
8894         }
8895
8896         return rc;
8897 }
8898
8899 static void bnx2x_self_test(struct net_device *dev,
8900                             struct ethtool_test *etest, u64 *buf)
8901 {
8902         struct bnx2x *bp = netdev_priv(dev);
8903
8904         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8905
8906         if (!netif_running(dev))
8907                 return;
8908
8909         /* offline tests are not supported in MF mode */
8910         if (IS_E1HMF(bp))
8911                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8912
8913         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8914                 u8 link_up;
8915
8916                 link_up = bp->link_vars.link_up;
8917                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8918                 bnx2x_nic_load(bp, LOAD_DIAG);
8919                 /* wait until link state is restored */
8920                 bnx2x_wait_for_link(bp, link_up);
8921
8922                 if (bnx2x_test_registers(bp) != 0) {
8923                         buf[0] = 1;
8924                         etest->flags |= ETH_TEST_FL_FAILED;
8925                 }
8926                 if (bnx2x_test_memory(bp) != 0) {
8927                         buf[1] = 1;
8928                         etest->flags |= ETH_TEST_FL_FAILED;
8929                 }
8930                 buf[2] = bnx2x_test_loopback(bp, link_up);
8931                 if (buf[2] != 0)
8932                         etest->flags |= ETH_TEST_FL_FAILED;
8933
8934                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8935                 bnx2x_nic_load(bp, LOAD_NORMAL);
8936                 /* wait until link state is restored */
8937                 bnx2x_wait_for_link(bp, link_up);
8938         }
8939         if (bnx2x_test_nvram(bp) != 0) {
8940                 buf[3] = 1;
8941                 etest->flags |= ETH_TEST_FL_FAILED;
8942         }
8943         if (bnx2x_test_intr(bp) != 0) {
8944                 buf[4] = 1;
8945                 etest->flags |= ETH_TEST_FL_FAILED;
8946         }
8947         if (bp->port.pmf)
8948                 if (bnx2x_link_test(bp) != 0) {
8949                         buf[5] = 1;
8950                         etest->flags |= ETH_TEST_FL_FAILED;
8951                 }
8952         buf[7] = bnx2x_mc_assert(bp);
8953         if (buf[7] != 0)
8954                 etest->flags |= ETH_TEST_FL_FAILED;
8955
8956 #ifdef BNX2X_EXTRA_DEBUG
8957         bnx2x_panic_dump(bp);
8958 #endif
8959 }
8960
8961 static const struct {
8962         long offset;
8963         int size;
8964         u32 flags;
8965 #define STATS_FLAGS_PORT                1
8966 #define STATS_FLAGS_FUNC                2
8967         u8 string[ETH_GSTRING_LEN];
8968 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8969 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8970                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8971         { STATS_OFFSET32(error_bytes_received_hi),
8972                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8973         { STATS_OFFSET32(total_bytes_transmitted_hi),
8974                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8975         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8976                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8977         { STATS_OFFSET32(total_unicast_packets_received_hi),
8978                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8979         { STATS_OFFSET32(total_multicast_packets_received_hi),
8980                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8981         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8982                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8983         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8984                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8985         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8986                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8987 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8988                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8989         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8990                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8991         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8992                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8993         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8994                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8995         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8996                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8997         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8998                                 8, STATS_FLAGS_PORT, "tx_deferred" },
8999         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9000                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9001         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9002                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9003         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9004                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9005         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9006                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9007 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9008                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9009         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9010                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9011         { STATS_OFFSET32(jabber_packets_received),
9012                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9013         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9014                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9015         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9016                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9017         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9018                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9019         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9020                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9021         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9022                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9023         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9024                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9025         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9026                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9027 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9028                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9029         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9030                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9031         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9032                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9033         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9034                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9035         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9036                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9037         { STATS_OFFSET32(mac_filter_discard),
9038                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9039         { STATS_OFFSET32(no_buff_discard),
9040                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9041         { STATS_OFFSET32(xxoverflow_discard),
9042                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9043         { STATS_OFFSET32(brb_drop_hi),
9044                                 8, STATS_FLAGS_PORT, "brb_discard" },
9045         { STATS_OFFSET32(brb_truncate_hi),
9046                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9047 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9048                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9049         { STATS_OFFSET32(rx_skb_alloc_failed),
9050                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9051 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9052                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9053 };
9054
9055 #define IS_NOT_E1HMF_STAT(bp, i) \
9056                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9057
9058 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9059 {
9060         struct bnx2x *bp = netdev_priv(dev);
9061         int i, j;
9062
9063         switch (stringset) {
9064         case ETH_SS_STATS:
9065                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9066                         if (IS_NOT_E1HMF_STAT(bp, i))
9067                                 continue;
9068                         strcpy(buf + j*ETH_GSTRING_LEN,
9069                                bnx2x_stats_arr[i].string);
9070                         j++;
9071                 }
9072                 break;
9073
9074         case ETH_SS_TEST:
9075                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9076                 break;
9077         }
9078 }
9079
9080 static int bnx2x_get_stats_count(struct net_device *dev)
9081 {
9082         struct bnx2x *bp = netdev_priv(dev);
9083         int i, num_stats = 0;
9084
9085         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9086                 if (IS_NOT_E1HMF_STAT(bp, i))
9087                         continue;
9088                 num_stats++;
9089         }
9090         return num_stats;
9091 }
9092
9093 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9094                                     struct ethtool_stats *stats, u64 *buf)
9095 {
9096         struct bnx2x *bp = netdev_priv(dev);
9097         u32 *hw_stats = (u32 *)&bp->eth_stats;
9098         int i, j;
9099
9100         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9101                 if (IS_NOT_E1HMF_STAT(bp, i))
9102                         continue;
9103
9104                 if (bnx2x_stats_arr[i].size == 0) {
9105                         /* skip this counter */
9106                         buf[j] = 0;
9107                         j++;
9108                         continue;
9109                 }
9110                 if (bnx2x_stats_arr[i].size == 4) {
9111                         /* 4-byte counter */
9112                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9113                         j++;
9114                         continue;
9115                 }
9116                 /* 8-byte counter */
9117                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9118                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9119                 j++;
9120         }
9121 }
9122
9123 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9124 {
9125         struct bnx2x *bp = netdev_priv(dev);
9126         int port = BP_PORT(bp);
9127         int i;
9128
9129         if (!netif_running(dev))
9130                 return 0;
9131
9132         if (!bp->port.pmf)
9133                 return 0;
9134
9135         if (data == 0)
9136                 data = 2;
9137
9138         for (i = 0; i < (data * 2); i++) {
9139                 if ((i % 2) == 0)
9140                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9141                                       bp->link_params.hw_led_mode,
9142                                       bp->link_params.chip_id);
9143                 else
9144                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9145                                       bp->link_params.hw_led_mode,
9146                                       bp->link_params.chip_id);
9147
9148                 msleep_interruptible(500);
9149                 if (signal_pending(current))
9150                         break;
9151         }
9152
9153         if (bp->link_vars.link_up)
9154                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9155                               bp->link_vars.line_speed,
9156                               bp->link_params.hw_led_mode,
9157                               bp->link_params.chip_id);
9158
9159         return 0;
9160 }
9161
9162 static struct ethtool_ops bnx2x_ethtool_ops = {
9163         .get_settings           = bnx2x_get_settings,
9164         .set_settings           = bnx2x_set_settings,
9165         .get_drvinfo            = bnx2x_get_drvinfo,
9166         .get_wol                = bnx2x_get_wol,
9167         .set_wol                = bnx2x_set_wol,
9168         .get_msglevel           = bnx2x_get_msglevel,
9169         .set_msglevel           = bnx2x_set_msglevel,
9170         .nway_reset             = bnx2x_nway_reset,
9171         .get_link               = ethtool_op_get_link,
9172         .get_eeprom_len         = bnx2x_get_eeprom_len,
9173         .get_eeprom             = bnx2x_get_eeprom,
9174         .set_eeprom             = bnx2x_set_eeprom,
9175         .get_coalesce           = bnx2x_get_coalesce,
9176         .set_coalesce           = bnx2x_set_coalesce,
9177         .get_ringparam          = bnx2x_get_ringparam,
9178         .set_ringparam          = bnx2x_set_ringparam,
9179         .get_pauseparam         = bnx2x_get_pauseparam,
9180         .set_pauseparam         = bnx2x_set_pauseparam,
9181         .get_rx_csum            = bnx2x_get_rx_csum,
9182         .set_rx_csum            = bnx2x_set_rx_csum,
9183         .get_tx_csum            = ethtool_op_get_tx_csum,
9184         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9185         .set_flags              = bnx2x_set_flags,
9186         .get_flags              = ethtool_op_get_flags,
9187         .get_sg                 = ethtool_op_get_sg,
9188         .set_sg                 = ethtool_op_set_sg,
9189         .get_tso                = ethtool_op_get_tso,
9190         .set_tso                = bnx2x_set_tso,
9191         .self_test_count        = bnx2x_self_test_count,
9192         .self_test              = bnx2x_self_test,
9193         .get_strings            = bnx2x_get_strings,
9194         .phys_id                = bnx2x_phys_id,
9195         .get_stats_count        = bnx2x_get_stats_count,
9196         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9197 };
9198
9199 /* end of ethtool_ops */
9200
9201 /****************************************************************************
9202 * General service functions
9203 ****************************************************************************/
9204
9205 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9206 {
9207         u16 pmcsr;
9208
9209         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9210
9211         switch (state) {
9212         case PCI_D0:
9213                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9214                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9215                                        PCI_PM_CTRL_PME_STATUS));
9216
9217                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9218                         /* delay required during transition out of D3hot */
9219                         msleep(20);
9220                 break;
9221
9222         case PCI_D3hot:
9223                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9224                 pmcsr |= 3;
9225
9226                 if (bp->wol)
9227                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9228
9229                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9230                                       pmcsr);
9231
9232                 /* No more memory access after this point until
9233                 * device is brought back to D0.
9234                 */
9235                 break;
9236
9237         default:
9238                 return -EINVAL;
9239         }
9240         return 0;
9241 }
9242
9243 /*
9244  * net_device service functions
9245  */
9246
9247 static int bnx2x_poll(struct napi_struct *napi, int budget)
9248 {
9249         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9250                                                  napi);
9251         struct bnx2x *bp = fp->bp;
9252         int work_done = 0;
9253
9254 #ifdef BNX2X_STOP_ON_ERROR
9255         if (unlikely(bp->panic))
9256                 goto poll_panic;
9257 #endif
9258
9259         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9260         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9261         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9262
9263         bnx2x_update_fpsb_idx(fp);
9264
9265         if (BNX2X_HAS_TX_WORK(fp))
9266                 bnx2x_tx_int(fp, budget);
9267
9268         if (BNX2X_HAS_RX_WORK(fp))
9269                 work_done = bnx2x_rx_int(fp, budget);
9270
9271         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9272
9273         /* must not complete if we consumed full budget */
9274         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9275
9276 #ifdef BNX2X_STOP_ON_ERROR
9277 poll_panic:
9278 #endif
9279                 netif_rx_complete(bp->dev, napi);
9280
9281                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9282                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9283                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9284                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9285         }
9286         return work_done;
9287 }
9288
9289
9290 /* we split the first BD into headers and data BDs
9291  * to ease the pain of our fellow microcode engineers
9292  * we use one mapping for both BDs
9293  * So far this has only been observed to happen
9294  * in Other Operating Systems(TM)
9295  */
9296 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9297                                    struct bnx2x_fastpath *fp,
9298                                    struct eth_tx_bd **tx_bd, u16 hlen,
9299                                    u16 bd_prod, int nbd)
9300 {
9301         struct eth_tx_bd *h_tx_bd = *tx_bd;
9302         struct eth_tx_bd *d_tx_bd;
9303         dma_addr_t mapping;
9304         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9305
9306         /* first fix first BD */
9307         h_tx_bd->nbd = cpu_to_le16(nbd);
9308         h_tx_bd->nbytes = cpu_to_le16(hlen);
9309
9310         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9311            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9312            h_tx_bd->addr_lo, h_tx_bd->nbd);
9313
9314         /* now get a new data BD
9315          * (after the pbd) and fill it */
9316         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9317         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9318
9319         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9320                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9321
9322         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9323         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9324         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9325         d_tx_bd->vlan = 0;
9326         /* this marks the BD as one that has no individual mapping
9327          * the FW ignores this flag in a BD not marked start
9328          */
9329         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9330         DP(NETIF_MSG_TX_QUEUED,
9331            "TSO split data size is %d (%x:%x)\n",
9332            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9333
9334         /* update tx_bd for marking the last BD flag */
9335         *tx_bd = d_tx_bd;
9336
9337         return bd_prod;
9338 }
9339
9340 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9341 {
9342         if (fix > 0)
9343                 csum = (u16) ~csum_fold(csum_sub(csum,
9344                                 csum_partial(t_header - fix, fix, 0)));
9345
9346         else if (fix < 0)
9347                 csum = (u16) ~csum_fold(csum_add(csum,
9348                                 csum_partial(t_header, -fix, 0)));
9349
9350         return swab16(csum);
9351 }
9352
9353 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9354 {
9355         u32 rc;
9356
9357         if (skb->ip_summed != CHECKSUM_PARTIAL)
9358                 rc = XMIT_PLAIN;
9359
9360         else {
9361                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9362                         rc = XMIT_CSUM_V6;
9363                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9364                                 rc |= XMIT_CSUM_TCP;
9365
9366                 } else {
9367                         rc = XMIT_CSUM_V4;
9368                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9369                                 rc |= XMIT_CSUM_TCP;
9370                 }
9371         }
9372
9373         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9374                 rc |= XMIT_GSO_V4;
9375
9376         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9377                 rc |= XMIT_GSO_V6;
9378
9379         return rc;
9380 }
9381
9382 /* check if packet requires linearization (packet is too fragmented) */
9383 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9384                              u32 xmit_type)
9385 {
9386         int to_copy = 0;
9387         int hlen = 0;
9388         int first_bd_sz = 0;
9389
9390         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9391         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9392
9393                 if (xmit_type & XMIT_GSO) {
9394                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9395                         /* Check if LSO packet needs to be copied:
9396                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9397                         int wnd_size = MAX_FETCH_BD - 3;
9398                         /* Number of windows to check */
9399                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9400                         int wnd_idx = 0;
9401                         int frag_idx = 0;
9402                         u32 wnd_sum = 0;
9403
9404                         /* Headers length */
9405                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9406                                 tcp_hdrlen(skb);
9407
9408                         /* Amount of data (w/o headers) on linear part of SKB*/
9409                         first_bd_sz = skb_headlen(skb) - hlen;
9410
9411                         wnd_sum  = first_bd_sz;
9412
9413                         /* Calculate the first sum - it's special */
9414                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9415                                 wnd_sum +=
9416                                         skb_shinfo(skb)->frags[frag_idx].size;
9417
9418                         /* If there was data on linear skb data - check it */
9419                         if (first_bd_sz > 0) {
9420                                 if (unlikely(wnd_sum < lso_mss)) {
9421                                         to_copy = 1;
9422                                         goto exit_lbl;
9423                                 }
9424
9425                                 wnd_sum -= first_bd_sz;
9426                         }
9427
9428                         /* Others are easier: run through the frag list and
9429                            check all windows */
9430                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9431                                 wnd_sum +=
9432                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9433
9434                                 if (unlikely(wnd_sum < lso_mss)) {
9435                                         to_copy = 1;
9436                                         break;
9437                                 }
9438                                 wnd_sum -=
9439                                         skb_shinfo(skb)->frags[wnd_idx].size;
9440                         }
9441
9442                 } else {
9443                         /* in non-LSO too fragmented packet should always
9444                            be linearized */
9445                         to_copy = 1;
9446                 }
9447         }
9448
9449 exit_lbl:
9450         if (unlikely(to_copy))
9451                 DP(NETIF_MSG_TX_QUEUED,
9452                    "Linearization IS REQUIRED for %s packet. "
9453                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9454                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9455                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9456
9457         return to_copy;
9458 }
9459
9460 /* called with netif_tx_lock
9461  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9462  * netif_wake_queue()
9463  */
9464 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9465 {
9466         struct bnx2x *bp = netdev_priv(dev);
9467         struct bnx2x_fastpath *fp;
9468         struct sw_tx_bd *tx_buf;
9469         struct eth_tx_bd *tx_bd;
9470         struct eth_tx_parse_bd *pbd = NULL;
9471         u16 pkt_prod, bd_prod;
9472         int nbd, fp_index;
9473         dma_addr_t mapping;
9474         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9475         int vlan_off = (bp->e1hov ? 4 : 0);
9476         int i;
9477         u8 hlen = 0;
9478
9479 #ifdef BNX2X_STOP_ON_ERROR
9480         if (unlikely(bp->panic))
9481                 return NETDEV_TX_BUSY;
9482 #endif
9483
9484         fp_index = (smp_processor_id() % bp->num_queues);
9485         fp = &bp->fp[fp_index];
9486
9487         if (unlikely(bnx2x_tx_avail(bp->fp) <
9488                                         (skb_shinfo(skb)->nr_frags + 3))) {
9489                 bp->eth_stats.driver_xoff++,
9490                 netif_stop_queue(dev);
9491                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9492                 return NETDEV_TX_BUSY;
9493         }
9494
9495         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9496            "  gso type %x  xmit_type %x\n",
9497            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9498            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9499
9500         /* First, check if we need to linearize the skb
9501            (due to FW restrictions) */
9502         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9503                 /* Statistics of linearization */
9504                 bp->lin_cnt++;
9505                 if (skb_linearize(skb) != 0) {
9506                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9507                            "silently dropping this SKB\n");
9508                         dev_kfree_skb_any(skb);
9509                         return NETDEV_TX_OK;
9510                 }
9511         }
9512
9513         /*
9514         Please read carefully. First we use one BD which we mark as start,
9515         then for TSO or xsum we have a parsing info BD,
9516         and only then we have the rest of the TSO BDs.
9517         (don't forget to mark the last one as last,
9518         and to unmap only AFTER you write to the BD ...)
9519         And above all, all pdb sizes are in words - NOT DWORDS!
9520         */
9521
9522         pkt_prod = fp->tx_pkt_prod++;
9523         bd_prod = TX_BD(fp->tx_bd_prod);
9524
9525         /* get a tx_buf and first BD */
9526         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9527         tx_bd = &fp->tx_desc_ring[bd_prod];
9528
9529         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9530         tx_bd->general_data = (UNICAST_ADDRESS <<
9531                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9532         /* header nbd */
9533         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9534
9535         /* remember the first BD of the packet */
9536         tx_buf->first_bd = fp->tx_bd_prod;
9537         tx_buf->skb = skb;
9538
9539         DP(NETIF_MSG_TX_QUEUED,
9540            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9541            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9542
9543         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9544                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9545                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9546                 vlan_off += 4;
9547         } else
9548                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9549
9550         if (xmit_type) {
9551
9552                 /* turn on parsing and get a BD */
9553                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9554                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9555
9556                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9557         }
9558
9559         if (xmit_type & XMIT_CSUM) {
9560                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9561
9562                 /* for now NS flag is not used in Linux */
9563                 pbd->global_data = (hlen |
9564                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9565                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9566
9567                 pbd->ip_hlen = (skb_transport_header(skb) -
9568                                 skb_network_header(skb)) / 2;
9569
9570                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9571
9572                 pbd->total_hlen = cpu_to_le16(hlen);
9573                 hlen = hlen*2 - vlan_off;
9574
9575                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9576
9577                 if (xmit_type & XMIT_CSUM_V4)
9578                         tx_bd->bd_flags.as_bitfield |=
9579                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9580                 else
9581                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9582
9583                 if (xmit_type & XMIT_CSUM_TCP) {
9584                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9585
9586                 } else {
9587                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9588
9589                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9590                         pbd->cs_offset = fix / 2;
9591
9592                         DP(NETIF_MSG_TX_QUEUED,
9593                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9594                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9595                            SKB_CS(skb));
9596
9597                         /* HW bug: fixup the CSUM */
9598                         pbd->tcp_pseudo_csum =
9599                                 bnx2x_csum_fix(skb_transport_header(skb),
9600                                                SKB_CS(skb), fix);
9601
9602                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9603                            pbd->tcp_pseudo_csum);
9604                 }
9605         }
9606
9607         mapping = pci_map_single(bp->pdev, skb->data,
9608                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9609
9610         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9611         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9612         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9613         tx_bd->nbd = cpu_to_le16(nbd);
9614         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9615
9616         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9617            "  nbytes %d  flags %x  vlan %x\n",
9618            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9619            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9620            le16_to_cpu(tx_bd->vlan));
9621
9622         if (xmit_type & XMIT_GSO) {
9623
9624                 DP(NETIF_MSG_TX_QUEUED,
9625                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9626                    skb->len, hlen, skb_headlen(skb),
9627                    skb_shinfo(skb)->gso_size);
9628
9629                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9630
9631                 if (unlikely(skb_headlen(skb) > hlen))
9632                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9633                                                  bd_prod, ++nbd);
9634
9635                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9636                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9637                 pbd->tcp_flags = pbd_tcp_flags(skb);
9638
9639                 if (xmit_type & XMIT_GSO_V4) {
9640                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9641                         pbd->tcp_pseudo_csum =
9642                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9643                                                           ip_hdr(skb)->daddr,
9644                                                           0, IPPROTO_TCP, 0));
9645
9646                 } else
9647                         pbd->tcp_pseudo_csum =
9648                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9649                                                         &ipv6_hdr(skb)->daddr,
9650                                                         0, IPPROTO_TCP, 0));
9651
9652                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9653         }
9654
9655         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9656                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9657
9658                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9659                 tx_bd = &fp->tx_desc_ring[bd_prod];
9660
9661                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9662                                        frag->size, PCI_DMA_TODEVICE);
9663
9664                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9665                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9666                 tx_bd->nbytes = cpu_to_le16(frag->size);
9667                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9668                 tx_bd->bd_flags.as_bitfield = 0;
9669
9670                 DP(NETIF_MSG_TX_QUEUED,
9671                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9672                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9673                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9674         }
9675
9676         /* now at last mark the BD as the last BD */
9677         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9678
9679         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9680            tx_bd, tx_bd->bd_flags.as_bitfield);
9681
9682         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9683
9684         /* now send a tx doorbell, counting the next BD
9685          * if the packet contains or ends with it
9686          */
9687         if (TX_BD_POFF(bd_prod) < nbd)
9688                 nbd++;
9689
9690         if (pbd)
9691                 DP(NETIF_MSG_TX_QUEUED,
9692                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9693                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9694                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9695                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9696                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9697
9698         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9699
9700         fp->hw_tx_prods->bds_prod =
9701                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9702         mb(); /* FW restriction: must not reorder writing nbd and packets */
9703         fp->hw_tx_prods->packets_prod =
9704                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9705         DOORBELL(bp, FP_IDX(fp), 0);
9706
9707         mmiowb();
9708
9709         fp->tx_bd_prod += nbd;
9710         dev->trans_start = jiffies;
9711
9712         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9713                 netif_stop_queue(dev);
9714                 bp->eth_stats.driver_xoff++;
9715                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9716                         netif_wake_queue(dev);
9717         }
9718         fp->tx_pkt++;
9719
9720         return NETDEV_TX_OK;
9721 }
9722
9723 /* called with rtnl_lock */
9724 static int bnx2x_open(struct net_device *dev)
9725 {
9726         struct bnx2x *bp = netdev_priv(dev);
9727
9728         bnx2x_set_power_state(bp, PCI_D0);
9729
9730         return bnx2x_nic_load(bp, LOAD_OPEN);
9731 }
9732
9733 /* called with rtnl_lock */
9734 static int bnx2x_close(struct net_device *dev)
9735 {
9736         struct bnx2x *bp = netdev_priv(dev);
9737
9738         /* Unload the driver, release IRQs */
9739         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9740         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9741                 if (!CHIP_REV_IS_SLOW(bp))
9742                         bnx2x_set_power_state(bp, PCI_D3hot);
9743
9744         return 0;
9745 }
9746
9747 /* called with netif_tx_lock from set_multicast */
9748 static void bnx2x_set_rx_mode(struct net_device *dev)
9749 {
9750         struct bnx2x *bp = netdev_priv(dev);
9751         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9752         int port = BP_PORT(bp);
9753
9754         if (bp->state != BNX2X_STATE_OPEN) {
9755                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9756                 return;
9757         }
9758
9759         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9760
9761         if (dev->flags & IFF_PROMISC)
9762                 rx_mode = BNX2X_RX_MODE_PROMISC;
9763
9764         else if ((dev->flags & IFF_ALLMULTI) ||
9765                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9766                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9767
9768         else { /* some multicasts */
9769                 if (CHIP_IS_E1(bp)) {
9770                         int i, old, offset;
9771                         struct dev_mc_list *mclist;
9772                         struct mac_configuration_cmd *config =
9773                                                 bnx2x_sp(bp, mcast_config);
9774
9775                         for (i = 0, mclist = dev->mc_list;
9776                              mclist && (i < dev->mc_count);
9777                              i++, mclist = mclist->next) {
9778
9779                                 config->config_table[i].
9780                                         cam_entry.msb_mac_addr =
9781                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9782                                 config->config_table[i].
9783                                         cam_entry.middle_mac_addr =
9784                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9785                                 config->config_table[i].
9786                                         cam_entry.lsb_mac_addr =
9787                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9788                                 config->config_table[i].cam_entry.flags =
9789                                                         cpu_to_le16(port);
9790                                 config->config_table[i].
9791                                         target_table_entry.flags = 0;
9792                                 config->config_table[i].
9793                                         target_table_entry.client_id = 0;
9794                                 config->config_table[i].
9795                                         target_table_entry.vlan_id = 0;
9796
9797                                 DP(NETIF_MSG_IFUP,
9798                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9799                                    config->config_table[i].
9800                                                 cam_entry.msb_mac_addr,
9801                                    config->config_table[i].
9802                                                 cam_entry.middle_mac_addr,
9803                                    config->config_table[i].
9804                                                 cam_entry.lsb_mac_addr);
9805                         }
9806                         old = config->hdr.length_6b;
9807                         if (old > i) {
9808                                 for (; i < old; i++) {
9809                                         if (CAM_IS_INVALID(config->
9810                                                            config_table[i])) {
9811                                                 i--; /* already invalidated */
9812                                                 break;
9813                                         }
9814                                         /* invalidate */
9815                                         CAM_INVALIDATE(config->
9816                                                        config_table[i]);
9817                                 }
9818                         }
9819
9820                         if (CHIP_REV_IS_SLOW(bp))
9821                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9822                         else
9823                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9824
9825                         config->hdr.length_6b = i;
9826                         config->hdr.offset = offset;
9827                         config->hdr.client_id = BP_CL_ID(bp);
9828                         config->hdr.reserved1 = 0;
9829
9830                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9831                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9832                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9833                                       0);
9834                 } else { /* E1H */
9835                         /* Accept one or more multicasts */
9836                         struct dev_mc_list *mclist;
9837                         u32 mc_filter[MC_HASH_SIZE];
9838                         u32 crc, bit, regidx;
9839                         int i;
9840
9841                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9842
9843                         for (i = 0, mclist = dev->mc_list;
9844                              mclist && (i < dev->mc_count);
9845                              i++, mclist = mclist->next) {
9846
9847                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9848                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9849                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9850                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9851                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9852
9853                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9854                                 bit = (crc >> 24) & 0xff;
9855                                 regidx = bit >> 5;
9856                                 bit &= 0x1f;
9857                                 mc_filter[regidx] |= (1 << bit);
9858                         }
9859
9860                         for (i = 0; i < MC_HASH_SIZE; i++)
9861                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9862                                        mc_filter[i]);
9863                 }
9864         }
9865
9866         bp->rx_mode = rx_mode;
9867         bnx2x_set_storm_rx_mode(bp);
9868 }
9869
9870 /* called with rtnl_lock */
9871 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9872 {
9873         struct sockaddr *addr = p;
9874         struct bnx2x *bp = netdev_priv(dev);
9875
9876         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9877                 return -EINVAL;
9878
9879         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9880         if (netif_running(dev)) {
9881                 if (CHIP_IS_E1(bp))
9882                         bnx2x_set_mac_addr_e1(bp, 1);
9883                 else
9884                         bnx2x_set_mac_addr_e1h(bp, 1);
9885         }
9886
9887         return 0;
9888 }
9889
9890 /* called with rtnl_lock */
9891 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9892 {
9893         struct mii_ioctl_data *data = if_mii(ifr);
9894         struct bnx2x *bp = netdev_priv(dev);
9895         int port = BP_PORT(bp);
9896         int err;
9897
9898         switch (cmd) {
9899         case SIOCGMIIPHY:
9900                 data->phy_id = bp->port.phy_addr;
9901
9902                 /* fallthrough */
9903
9904         case SIOCGMIIREG: {
9905                 u16 mii_regval;
9906
9907                 if (!netif_running(dev))
9908                         return -EAGAIN;
9909
9910                 mutex_lock(&bp->port.phy_mutex);
9911                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9912                                       DEFAULT_PHY_DEV_ADDR,
9913                                       (data->reg_num & 0x1f), &mii_regval);
9914                 data->val_out = mii_regval;
9915                 mutex_unlock(&bp->port.phy_mutex);
9916                 return err;
9917         }
9918
9919         case SIOCSMIIREG:
9920                 if (!capable(CAP_NET_ADMIN))
9921                         return -EPERM;
9922
9923                 if (!netif_running(dev))
9924                         return -EAGAIN;
9925
9926                 mutex_lock(&bp->port.phy_mutex);
9927                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9928                                        DEFAULT_PHY_DEV_ADDR,
9929                                        (data->reg_num & 0x1f), data->val_in);
9930                 mutex_unlock(&bp->port.phy_mutex);
9931                 return err;
9932
9933         default:
9934                 /* do nothing */
9935                 break;
9936         }
9937
9938         return -EOPNOTSUPP;
9939 }
9940
9941 /* called with rtnl_lock */
9942 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9943 {
9944         struct bnx2x *bp = netdev_priv(dev);
9945         int rc = 0;
9946
9947         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9948             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9949                 return -EINVAL;
9950
9951         /* This does not race with packet allocation
9952          * because the actual alloc size is
9953          * only updated as part of load
9954          */
9955         dev->mtu = new_mtu;
9956
9957         if (netif_running(dev)) {
9958                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9959                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9960         }
9961
9962         return rc;
9963 }
9964
9965 static void bnx2x_tx_timeout(struct net_device *dev)
9966 {
9967         struct bnx2x *bp = netdev_priv(dev);
9968
9969 #ifdef BNX2X_STOP_ON_ERROR
9970         if (!bp->panic)
9971                 bnx2x_panic();
9972 #endif
9973         /* This allows the netif to be shutdown gracefully before resetting */
9974         schedule_work(&bp->reset_task);
9975 }
9976
9977 #ifdef BCM_VLAN
9978 /* called with rtnl_lock */
9979 static void bnx2x_vlan_rx_register(struct net_device *dev,
9980                                    struct vlan_group *vlgrp)
9981 {
9982         struct bnx2x *bp = netdev_priv(dev);
9983
9984         bp->vlgrp = vlgrp;
9985         if (netif_running(dev))
9986                 bnx2x_set_client_config(bp);
9987 }
9988
9989 #endif
9990
9991 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9992 static void poll_bnx2x(struct net_device *dev)
9993 {
9994         struct bnx2x *bp = netdev_priv(dev);
9995
9996         disable_irq(bp->pdev->irq);
9997         bnx2x_interrupt(bp->pdev->irq, dev);
9998         enable_irq(bp->pdev->irq);
9999 }
10000 #endif
10001
10002 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10003                                     struct net_device *dev)
10004 {
10005         struct bnx2x *bp;
10006         int rc;
10007
10008         SET_NETDEV_DEV(dev, &pdev->dev);
10009         bp = netdev_priv(dev);
10010
10011         bp->dev = dev;
10012         bp->pdev = pdev;
10013         bp->flags = 0;
10014         bp->func = PCI_FUNC(pdev->devfn);
10015
10016         rc = pci_enable_device(pdev);
10017         if (rc) {
10018                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10019                 goto err_out;
10020         }
10021
10022         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10023                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10024                        " aborting\n");
10025                 rc = -ENODEV;
10026                 goto err_out_disable;
10027         }
10028
10029         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10030                 printk(KERN_ERR PFX "Cannot find second PCI device"
10031                        " base address, aborting\n");
10032                 rc = -ENODEV;
10033                 goto err_out_disable;
10034         }
10035
10036         if (atomic_read(&pdev->enable_cnt) == 1) {
10037                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10038                 if (rc) {
10039                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10040                                " aborting\n");
10041                         goto err_out_disable;
10042                 }
10043
10044                 pci_set_master(pdev);
10045                 pci_save_state(pdev);
10046         }
10047
10048         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10049         if (bp->pm_cap == 0) {
10050                 printk(KERN_ERR PFX "Cannot find power management"
10051                        " capability, aborting\n");
10052                 rc = -EIO;
10053                 goto err_out_release;
10054         }
10055
10056         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10057         if (bp->pcie_cap == 0) {
10058                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10059                        " aborting\n");
10060                 rc = -EIO;
10061                 goto err_out_release;
10062         }
10063
10064         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10065                 bp->flags |= USING_DAC_FLAG;
10066                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10067                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10068                                " failed, aborting\n");
10069                         rc = -EIO;
10070                         goto err_out_release;
10071                 }
10072
10073         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10074                 printk(KERN_ERR PFX "System does not support DMA,"
10075                        " aborting\n");
10076                 rc = -EIO;
10077                 goto err_out_release;
10078         }
10079
10080         dev->mem_start = pci_resource_start(pdev, 0);
10081         dev->base_addr = dev->mem_start;
10082         dev->mem_end = pci_resource_end(pdev, 0);
10083
10084         dev->irq = pdev->irq;
10085
10086         bp->regview = ioremap_nocache(dev->base_addr,
10087                                       pci_resource_len(pdev, 0));
10088         if (!bp->regview) {
10089                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10090                 rc = -ENOMEM;
10091                 goto err_out_release;
10092         }
10093
10094         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10095                                         min_t(u64, BNX2X_DB_SIZE,
10096                                               pci_resource_len(pdev, 2)));
10097         if (!bp->doorbells) {
10098                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10099                 rc = -ENOMEM;
10100                 goto err_out_unmap;
10101         }
10102
10103         bnx2x_set_power_state(bp, PCI_D0);
10104
10105         /* clean indirect addresses */
10106         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10107                                PCICFG_VENDOR_ID_OFFSET);
10108         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10109         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10110         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10111         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10112
10113         dev->hard_start_xmit = bnx2x_start_xmit;
10114         dev->watchdog_timeo = TX_TIMEOUT;
10115
10116         dev->ethtool_ops = &bnx2x_ethtool_ops;
10117         dev->open = bnx2x_open;
10118         dev->stop = bnx2x_close;
10119         dev->set_multicast_list = bnx2x_set_rx_mode;
10120         dev->set_mac_address = bnx2x_change_mac_addr;
10121         dev->do_ioctl = bnx2x_ioctl;
10122         dev->change_mtu = bnx2x_change_mtu;
10123         dev->tx_timeout = bnx2x_tx_timeout;
10124 #ifdef BCM_VLAN
10125         dev->vlan_rx_register = bnx2x_vlan_rx_register;
10126 #endif
10127 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10128         dev->poll_controller = poll_bnx2x;
10129 #endif
10130         dev->features |= NETIF_F_SG;
10131         dev->features |= NETIF_F_HW_CSUM;
10132         if (bp->flags & USING_DAC_FLAG)
10133                 dev->features |= NETIF_F_HIGHDMA;
10134 #ifdef BCM_VLAN
10135         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10136 #endif
10137         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10138         dev->features |= NETIF_F_TSO6;
10139
10140         return 0;
10141
10142 err_out_unmap:
10143         if (bp->regview) {
10144                 iounmap(bp->regview);
10145                 bp->regview = NULL;
10146         }
10147         if (bp->doorbells) {
10148                 iounmap(bp->doorbells);
10149                 bp->doorbells = NULL;
10150         }
10151
10152 err_out_release:
10153         if (atomic_read(&pdev->enable_cnt) == 1)
10154                 pci_release_regions(pdev);
10155
10156 err_out_disable:
10157         pci_disable_device(pdev);
10158         pci_set_drvdata(pdev, NULL);
10159
10160 err_out:
10161         return rc;
10162 }
10163
10164 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10165 {
10166         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10167
10168         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10169         return val;
10170 }
10171
10172 /* return value of 1=2.5GHz 2=5GHz */
10173 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10174 {
10175         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10176
10177         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10178         return val;
10179 }
10180
10181 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10182                                     const struct pci_device_id *ent)
10183 {
10184         static int version_printed;
10185         struct net_device *dev = NULL;
10186         struct bnx2x *bp;
10187         int rc;
10188         DECLARE_MAC_BUF(mac);
10189
10190         if (version_printed++ == 0)
10191                 printk(KERN_INFO "%s", version);
10192
10193         /* dev zeroed in init_etherdev */
10194         dev = alloc_etherdev(sizeof(*bp));
10195         if (!dev) {
10196                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10197                 return -ENOMEM;
10198         }
10199
10200         netif_carrier_off(dev);
10201
10202         bp = netdev_priv(dev);
10203         bp->msglevel = debug;
10204
10205         rc = bnx2x_init_dev(pdev, dev);
10206         if (rc < 0) {
10207                 free_netdev(dev);
10208                 return rc;
10209         }
10210
10211         rc = register_netdev(dev);
10212         if (rc) {
10213                 dev_err(&pdev->dev, "Cannot register net device\n");
10214                 goto init_one_exit;
10215         }
10216
10217         pci_set_drvdata(pdev, dev);
10218
10219         rc = bnx2x_init_bp(bp);
10220         if (rc) {
10221                 unregister_netdev(dev);
10222                 goto init_one_exit;
10223         }
10224
10225         bp->common.name = board_info[ent->driver_data].name;
10226         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10227                " IRQ %d, ", dev->name, bp->common.name,
10228                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10229                bnx2x_get_pcie_width(bp),
10230                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10231                dev->base_addr, bp->pdev->irq);
10232         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10233         return 0;
10234
10235 init_one_exit:
10236         if (bp->regview)
10237                 iounmap(bp->regview);
10238
10239         if (bp->doorbells)
10240                 iounmap(bp->doorbells);
10241
10242         free_netdev(dev);
10243
10244         if (atomic_read(&pdev->enable_cnt) == 1)
10245                 pci_release_regions(pdev);
10246
10247         pci_disable_device(pdev);
10248         pci_set_drvdata(pdev, NULL);
10249
10250         return rc;
10251 }
10252
10253 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10254 {
10255         struct net_device *dev = pci_get_drvdata(pdev);
10256         struct bnx2x *bp;
10257
10258         if (!dev) {
10259                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10260                 return;
10261         }
10262         bp = netdev_priv(dev);
10263
10264         unregister_netdev(dev);
10265
10266         if (bp->regview)
10267                 iounmap(bp->regview);
10268
10269         if (bp->doorbells)
10270                 iounmap(bp->doorbells);
10271
10272         free_netdev(dev);
10273
10274         if (atomic_read(&pdev->enable_cnt) == 1)
10275                 pci_release_regions(pdev);
10276
10277         pci_disable_device(pdev);
10278         pci_set_drvdata(pdev, NULL);
10279 }
10280
10281 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10282 {
10283         struct net_device *dev = pci_get_drvdata(pdev);
10284         struct bnx2x *bp;
10285
10286         if (!dev) {
10287                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10288                 return -ENODEV;
10289         }
10290         bp = netdev_priv(dev);
10291
10292         rtnl_lock();
10293
10294         pci_save_state(pdev);
10295
10296         if (!netif_running(dev)) {
10297                 rtnl_unlock();
10298                 return 0;
10299         }
10300
10301         netif_device_detach(dev);
10302
10303         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10304
10305         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10306
10307         rtnl_unlock();
10308
10309         return 0;
10310 }
10311
10312 static int bnx2x_resume(struct pci_dev *pdev)
10313 {
10314         struct net_device *dev = pci_get_drvdata(pdev);
10315         struct bnx2x *bp;
10316         int rc;
10317
10318         if (!dev) {
10319                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10320                 return -ENODEV;
10321         }
10322         bp = netdev_priv(dev);
10323
10324         rtnl_lock();
10325
10326         pci_restore_state(pdev);
10327
10328         if (!netif_running(dev)) {
10329                 rtnl_unlock();
10330                 return 0;
10331         }
10332
10333         bnx2x_set_power_state(bp, PCI_D0);
10334         netif_device_attach(dev);
10335
10336         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10337
10338         rtnl_unlock();
10339
10340         return rc;
10341 }
10342
10343 /**
10344  * bnx2x_io_error_detected - called when PCI error is detected
10345  * @pdev: Pointer to PCI device
10346  * @state: The current pci connection state
10347  *
10348  * This function is called after a PCI bus error affecting
10349  * this device has been detected.
10350  */
10351 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10352                                                 pci_channel_state_t state)
10353 {
10354         struct net_device *dev = pci_get_drvdata(pdev);
10355         struct bnx2x *bp = netdev_priv(dev);
10356
10357         rtnl_lock();
10358
10359         netif_device_detach(dev);
10360
10361         if (netif_running(dev))
10362                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10363
10364         pci_disable_device(pdev);
10365
10366         rtnl_unlock();
10367
10368         /* Request a slot reset */
10369         return PCI_ERS_RESULT_NEED_RESET;
10370 }
10371
10372 /**
10373  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10374  * @pdev: Pointer to PCI device
10375  *
10376  * Restart the card from scratch, as if from a cold-boot.
10377  */
10378 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10379 {
10380         struct net_device *dev = pci_get_drvdata(pdev);
10381         struct bnx2x *bp = netdev_priv(dev);
10382
10383         rtnl_lock();
10384
10385         if (pci_enable_device(pdev)) {
10386                 dev_err(&pdev->dev,
10387                         "Cannot re-enable PCI device after reset\n");
10388                 rtnl_unlock();
10389                 return PCI_ERS_RESULT_DISCONNECT;
10390         }
10391
10392         pci_set_master(pdev);
10393         pci_restore_state(pdev);
10394
10395         if (netif_running(dev))
10396                 bnx2x_set_power_state(bp, PCI_D0);
10397
10398         rtnl_unlock();
10399
10400         return PCI_ERS_RESULT_RECOVERED;
10401 }
10402
10403 /**
10404  * bnx2x_io_resume - called when traffic can start flowing again
10405  * @pdev: Pointer to PCI device
10406  *
10407  * This callback is called when the error recovery driver tells us that
10408  * its OK to resume normal operation.
10409  */
10410 static void bnx2x_io_resume(struct pci_dev *pdev)
10411 {
10412         struct net_device *dev = pci_get_drvdata(pdev);
10413         struct bnx2x *bp = netdev_priv(dev);
10414
10415         rtnl_lock();
10416
10417         if (netif_running(dev))
10418                 bnx2x_nic_load(bp, LOAD_OPEN);
10419
10420         netif_device_attach(dev);
10421
10422         rtnl_unlock();
10423 }
10424
10425 static struct pci_error_handlers bnx2x_err_handler = {
10426         .error_detected = bnx2x_io_error_detected,
10427         .slot_reset = bnx2x_io_slot_reset,
10428         .resume = bnx2x_io_resume,
10429 };
10430
10431 static struct pci_driver bnx2x_pci_driver = {
10432         .name        = DRV_MODULE_NAME,
10433         .id_table    = bnx2x_pci_tbl,
10434         .probe       = bnx2x_init_one,
10435         .remove      = __devexit_p(bnx2x_remove_one),
10436         .suspend     = bnx2x_suspend,
10437         .resume      = bnx2x_resume,
10438         .err_handler = &bnx2x_err_handler,
10439 };
10440
10441 static int __init bnx2x_init(void)
10442 {
10443         return pci_register_driver(&bnx2x_pci_driver);
10444 }
10445
10446 static void __exit bnx2x_cleanup(void)
10447 {
10448         pci_unregister_driver(&bnx2x_pci_driver);
10449 }
10450
10451 module_init(bnx2x_init);
10452 module_exit(bnx2x_cleanup);
10453