]> err.no Git - linux-2.6/blob - drivers/net/bnx2x_main.c
bnx2x: Delay in while loops
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = 0;
559                 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         /* prevent the HW from sending interrupts */
661         bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889                            union eth_rx_cqe *rr_cqe)
890 {
891         struct bnx2x *bp = fp->bp;
892         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
895         DP(BNX2X_MSG_SP,
896            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
897            FP_IDX(fp), cid, command, bp->state,
898            rr_cqe->ramrod_cqe.ramrod_type);
899
900         bp->spq_left++;
901
902         if (FP_IDX(fp)) {
903                 switch (command | fp->state) {
904                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905                                                 BNX2X_FP_STATE_OPENING):
906                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907                            cid);
908                         fp->state = BNX2X_FP_STATE_OPEN;
909                         break;
910
911                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913                            cid);
914                         fp->state = BNX2X_FP_STATE_HALTED;
915                         break;
916
917                 default:
918                         BNX2X_ERR("unexpected MC reply (%d)  "
919                                   "fp->state is %x\n", command, fp->state);
920                         break;
921                 }
922                 mb(); /* force bnx2x_wait_ramrod() to see the change */
923                 return;
924         }
925
926         switch (command | bp->state) {
927         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929                 bp->state = BNX2X_STATE_OPEN;
930                 break;
931
932         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935                 fp->state = BNX2X_FP_STATE_HALTED;
936                 break;
937
938         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
941                 break;
942
943         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
944         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
945                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
946                 bp->set_mac_pending = 0;
947                 break;
948
949         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
950                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
951                 break;
952
953         default:
954                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
955                           command, bp->state);
956                 break;
957         }
958         mb(); /* force bnx2x_wait_ramrod() to see the change */
959 }
960
961 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
962                                      struct bnx2x_fastpath *fp, u16 index)
963 {
964         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
965         struct page *page = sw_buf->page;
966         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
967
968         /* Skip "next page" elements */
969         if (!page)
970                 return;
971
972         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
973                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
974         __free_pages(page, PAGES_PER_SGE_SHIFT);
975
976         sw_buf->page = NULL;
977         sge->addr_hi = 0;
978         sge->addr_lo = 0;
979 }
980
981 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
982                                            struct bnx2x_fastpath *fp, int last)
983 {
984         int i;
985
986         for (i = 0; i < last; i++)
987                 bnx2x_free_rx_sge(bp, fp, i);
988 }
989
990 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
991                                      struct bnx2x_fastpath *fp, u16 index)
992 {
993         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
994         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
995         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
996         dma_addr_t mapping;
997
998         if (unlikely(page == NULL))
999                 return -ENOMEM;
1000
1001         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1002                                PCI_DMA_FROMDEVICE);
1003         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1004                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1005                 return -ENOMEM;
1006         }
1007
1008         sw_buf->page = page;
1009         pci_unmap_addr_set(sw_buf, mapping, mapping);
1010
1011         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1012         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1013
1014         return 0;
1015 }
1016
1017 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1018                                      struct bnx2x_fastpath *fp, u16 index)
1019 {
1020         struct sk_buff *skb;
1021         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1022         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1023         dma_addr_t mapping;
1024
1025         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1026         if (unlikely(skb == NULL))
1027                 return -ENOMEM;
1028
1029         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1030                                  PCI_DMA_FROMDEVICE);
1031         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1032                 dev_kfree_skb(skb);
1033                 return -ENOMEM;
1034         }
1035
1036         rx_buf->skb = skb;
1037         pci_unmap_addr_set(rx_buf, mapping, mapping);
1038
1039         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1040         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1041
1042         return 0;
1043 }
1044
1045 /* note that we are not allocating a new skb,
1046  * we are just moving one from cons to prod
1047  * we are not creating a new mapping,
1048  * so there is no need to check for dma_mapping_error().
1049  */
1050 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1051                                struct sk_buff *skb, u16 cons, u16 prod)
1052 {
1053         struct bnx2x *bp = fp->bp;
1054         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1055         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1056         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1057         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1058
1059         pci_dma_sync_single_for_device(bp->pdev,
1060                                        pci_unmap_addr(cons_rx_buf, mapping),
1061                                        bp->rx_offset + RX_COPY_THRESH,
1062                                        PCI_DMA_FROMDEVICE);
1063
1064         prod_rx_buf->skb = cons_rx_buf->skb;
1065         pci_unmap_addr_set(prod_rx_buf, mapping,
1066                            pci_unmap_addr(cons_rx_buf, mapping));
1067         *prod_bd = *cons_bd;
1068 }
1069
1070 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1071                                              u16 idx)
1072 {
1073         u16 last_max = fp->last_max_sge;
1074
1075         if (SUB_S16(idx, last_max) > 0)
1076                 fp->last_max_sge = idx;
1077 }
1078
1079 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1080 {
1081         int i, j;
1082
1083         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1084                 int idx = RX_SGE_CNT * i - 1;
1085
1086                 for (j = 0; j < 2; j++) {
1087                         SGE_MASK_CLEAR_BIT(fp, idx);
1088                         idx--;
1089                 }
1090         }
1091 }
1092
1093 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1094                                   struct eth_fast_path_rx_cqe *fp_cqe)
1095 {
1096         struct bnx2x *bp = fp->bp;
1097         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1098                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1099                       BCM_PAGE_SHIFT;
1100         u16 last_max, last_elem, first_elem;
1101         u16 delta = 0;
1102         u16 i;
1103
1104         if (!sge_len)
1105                 return;
1106
1107         /* First mark all used pages */
1108         for (i = 0; i < sge_len; i++)
1109                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1110
1111         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1112            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1113
1114         /* Here we assume that the last SGE index is the biggest */
1115         prefetch((void *)(fp->sge_mask));
1116         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118         last_max = RX_SGE(fp->last_max_sge);
1119         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1120         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1121
1122         /* If ring is not full */
1123         if (last_elem + 1 != first_elem)
1124                 last_elem++;
1125
1126         /* Now update the prod */
1127         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1128                 if (likely(fp->sge_mask[i]))
1129                         break;
1130
1131                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1132                 delta += RX_SGE_MASK_ELEM_SZ;
1133         }
1134
1135         if (delta > 0) {
1136                 fp->rx_sge_prod += delta;
1137                 /* clear page-end entries */
1138                 bnx2x_clear_sge_mask_next_elems(fp);
1139         }
1140
1141         DP(NETIF_MSG_RX_STATUS,
1142            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1143            fp->last_max_sge, fp->rx_sge_prod);
1144 }
1145
1146 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1147 {
1148         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1149         memset(fp->sge_mask, 0xff,
1150                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1151
1152         /* Clear the two last indeces in the page to 1:
1153            these are the indeces that correspond to the "next" element,
1154            hence will never be indicated and should be removed from
1155            the calculations. */
1156         bnx2x_clear_sge_mask_next_elems(fp);
1157 }
1158
1159 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1160                             struct sk_buff *skb, u16 cons, u16 prod)
1161 {
1162         struct bnx2x *bp = fp->bp;
1163         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1164         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1165         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1166         dma_addr_t mapping;
1167
1168         /* move empty skb from pool to prod and map it */
1169         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1170         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1171                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1172         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1173
1174         /* move partial skb from cons to pool (don't unmap yet) */
1175         fp->tpa_pool[queue] = *cons_rx_buf;
1176
1177         /* mark bin state as start - print error if current state != stop */
1178         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1179                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1180
1181         fp->tpa_state[queue] = BNX2X_TPA_START;
1182
1183         /* point prod_bd to new skb */
1184         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1185         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1186
1187 #ifdef BNX2X_STOP_ON_ERROR
1188         fp->tpa_queue_used |= (1 << queue);
1189 #ifdef __powerpc64__
1190         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1191 #else
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1193 #endif
1194            fp->tpa_queue_used);
1195 #endif
1196 }
1197
1198 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1199                                struct sk_buff *skb,
1200                                struct eth_fast_path_rx_cqe *fp_cqe,
1201                                u16 cqe_idx)
1202 {
1203         struct sw_rx_page *rx_pg, old_rx_pg;
1204         struct page *sge;
1205         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1206         u32 i, frag_len, frag_size, pages;
1207         int err;
1208         int j;
1209
1210         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1211         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1212
1213         /* This is needed in order to enable forwarding support */
1214         if (frag_size)
1215                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1216                                                max(frag_size, (u32)len_on_bd));
1217
1218 #ifdef BNX2X_STOP_ON_ERROR
1219         if (pages > 8*PAGES_PER_SGE) {
1220                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1221                           pages, cqe_idx);
1222                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1223                           fp_cqe->pkt_len, len_on_bd);
1224                 bnx2x_panic();
1225                 return -EINVAL;
1226         }
1227 #endif
1228
1229         /* Run through the SGL and compose the fragmented skb */
1230         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1231                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1232
1233                 /* FW gives the indices of the SGE as if the ring is an array
1234                    (meaning that "next" element will consume 2 indices) */
1235                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1236                 rx_pg = &fp->rx_page_ring[sge_idx];
1237                 sge = rx_pg->page;
1238                 old_rx_pg = *rx_pg;
1239
1240                 /* If we fail to allocate a substitute page, we simply stop
1241                    where we are and drop the whole packet */
1242                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1243                 if (unlikely(err)) {
1244                         bp->eth_stats.rx_skb_alloc_failed++;
1245                         return err;
1246                 }
1247
1248                 /* Unmap the page as we r going to pass it to the stack */
1249                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1250                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1251
1252                 /* Add one frag and update the appropriate fields in the skb */
1253                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1254
1255                 skb->data_len += frag_len;
1256                 skb->truesize += frag_len;
1257                 skb->len += frag_len;
1258
1259                 frag_size -= frag_len;
1260         }
1261
1262         return 0;
1263 }
1264
1265 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1266                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1267                            u16 cqe_idx)
1268 {
1269         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1270         struct sk_buff *skb = rx_buf->skb;
1271         /* alloc new skb */
1272         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1273
1274         /* Unmap skb in the pool anyway, as we are going to change
1275            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1276            fails. */
1277         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1278                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1279
1280         if (likely(new_skb)) {
1281                 /* fix ip xsum and give it to the stack */
1282                 /* (no need to map the new skb) */
1283
1284                 prefetch(skb);
1285                 prefetch(((char *)(skb)) + 128);
1286
1287 #ifdef BNX2X_STOP_ON_ERROR
1288                 if (pad + len > bp->rx_buf_size) {
1289                         BNX2X_ERR("skb_put is about to fail...  "
1290                                   "pad %d  len %d  rx_buf_size %d\n",
1291                                   pad, len, bp->rx_buf_size);
1292                         bnx2x_panic();
1293                         return;
1294                 }
1295 #endif
1296
1297                 skb_reserve(skb, pad);
1298                 skb_put(skb, len);
1299
1300                 skb->protocol = eth_type_trans(skb, bp->dev);
1301                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1302
1303                 {
1304                         struct iphdr *iph;
1305
1306                         iph = (struct iphdr *)skb->data;
1307                         iph->check = 0;
1308                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1309                 }
1310
1311                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1312                                          &cqe->fast_path_cqe, cqe_idx)) {
1313 #ifdef BCM_VLAN
1314                         if ((bp->vlgrp != NULL) &&
1315                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316                              PARSING_FLAGS_VLAN))
1317                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1318                                                 le16_to_cpu(cqe->fast_path_cqe.
1319                                                             vlan_tag));
1320                         else
1321 #endif
1322                                 netif_receive_skb(skb);
1323                 } else {
1324                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1325                            " - dropping packet!\n");
1326                         dev_kfree_skb(skb);
1327                 }
1328
1329                 bp->dev->last_rx = jiffies;
1330
1331                 /* put new skb in bin */
1332                 fp->tpa_pool[queue].skb = new_skb;
1333
1334         } else {
1335                 /* else drop the packet and keep the buffer in the bin */
1336                 DP(NETIF_MSG_RX_STATUS,
1337                    "Failed to allocate new skb - dropping packet!\n");
1338                 bp->eth_stats.rx_skb_alloc_failed++;
1339         }
1340
1341         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1342 }
1343
1344 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1345                                         struct bnx2x_fastpath *fp,
1346                                         u16 bd_prod, u16 rx_comp_prod,
1347                                         u16 rx_sge_prod)
1348 {
1349         struct tstorm_eth_rx_producers rx_prods = {0};
1350         int i;
1351
1352         /* Update producers */
1353         rx_prods.bd_prod = bd_prod;
1354         rx_prods.cqe_prod = rx_comp_prod;
1355         rx_prods.sge_prod = rx_sge_prod;
1356
1357         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1358                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1359                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1360                        ((u32 *)&rx_prods)[i]);
1361
1362         DP(NETIF_MSG_RX_STATUS,
1363            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1364            bd_prod, rx_comp_prod, rx_sge_prod);
1365 }
1366
1367 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1368 {
1369         struct bnx2x *bp = fp->bp;
1370         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1371         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1372         int rx_pkt = 0;
1373         u16 queue;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_use_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559                 bp->dev->last_rx = jiffies;
1560
1561 next_rx:
1562                 rx_buf->skb = NULL;
1563
1564                 bd_cons = NEXT_RX_IDX(bd_cons);
1565                 bd_prod = NEXT_RX_IDX(bd_prod);
1566                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567                 rx_pkt++;
1568 next_cqe:
1569                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1571
1572                 if (rx_pkt == budget)
1573                         break;
1574         } /* while */
1575
1576         fp->rx_bd_cons = bd_cons;
1577         fp->rx_bd_prod = bd_prod_fw;
1578         fp->rx_comp_cons = sw_comp_cons;
1579         fp->rx_comp_prod = sw_comp_prod;
1580
1581         /* Update producers */
1582         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583                              fp->rx_sge_prod);
1584         mmiowb(); /* keep prod updates ordered */
1585
1586         fp->rx_pkt += rx_pkt;
1587         fp->rx_calls++;
1588
1589         return rx_pkt;
1590 }
1591
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593 {
1594         struct bnx2x_fastpath *fp = fp_cookie;
1595         struct bnx2x *bp = fp->bp;
1596         struct net_device *dev = bp->dev;
1597         int index = FP_IDX(fp);
1598
1599         /* Return here if interrupt is disabled */
1600         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602                 return IRQ_HANDLED;
1603         }
1604
1605         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606            index, FP_SB_ID(fp));
1607         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1608
1609 #ifdef BNX2X_STOP_ON_ERROR
1610         if (unlikely(bp->panic))
1611                 return IRQ_HANDLED;
1612 #endif
1613
1614         prefetch(fp->rx_cons_sb);
1615         prefetch(fp->tx_cons_sb);
1616         prefetch(&fp->status_blk->c_status_block.status_block_index);
1617         prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1620
1621         return IRQ_HANDLED;
1622 }
1623
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625 {
1626         struct net_device *dev = dev_instance;
1627         struct bnx2x *bp = netdev_priv(dev);
1628         u16 status = bnx2x_ack_int(bp);
1629         u16 mask;
1630
1631         /* Return here if interrupt is shared and it's not for us */
1632         if (unlikely(status == 0)) {
1633                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634                 return IRQ_NONE;
1635         }
1636         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1637
1638 #ifdef BNX2X_STOP_ON_ERROR
1639         if (unlikely(bp->panic))
1640                 return IRQ_HANDLED;
1641 #endif
1642
1643         /* Return here if interrupt is disabled */
1644         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1645                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1646                 return IRQ_HANDLED;
1647         }
1648
1649         mask = 0x2 << bp->fp[0].sb_id;
1650         if (status & mask) {
1651                 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653                 prefetch(fp->rx_cons_sb);
1654                 prefetch(fp->tx_cons_sb);
1655                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
1660                 status &= ~mask;
1661         }
1662
1663
1664         if (unlikely(status & 0x1)) {
1665                 schedule_work(&bp->sp_task);
1666
1667                 status &= ~0x1;
1668                 if (!status)
1669                         return IRQ_HANDLED;
1670         }
1671
1672         if (status)
1673                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674                    status);
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 /* end of fast path */
1680
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1682
1683 /* Link */
1684
1685 /*
1686  * General service functions
1687  */
1688
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1690 {
1691         u32 lock_status;
1692         u32 resource_bit = (1 << resource);
1693         int func = BP_FUNC(bp);
1694         u32 hw_lock_control_reg;
1695         int cnt;
1696
1697         /* Validating that the resource is within range */
1698         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699                 DP(NETIF_MSG_HW,
1700                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702                 return -EINVAL;
1703         }
1704
1705         if (func <= 5) {
1706                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707         } else {
1708                 hw_lock_control_reg =
1709                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710         }
1711
1712         /* Validating that the resource is not already taken */
1713         lock_status = REG_RD(bp, hw_lock_control_reg);
1714         if (lock_status & resource_bit) {
1715                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1716                    lock_status, resource_bit);
1717                 return -EEXIST;
1718         }
1719
1720         /* Try for 1 second every 5ms */
1721         for (cnt = 0; cnt < 200; cnt++) {
1722                 /* Try to acquire the lock */
1723                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724                 lock_status = REG_RD(bp, hw_lock_control_reg);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         int func = BP_FUNC(bp);
1739         u32 hw_lock_control_reg;
1740
1741         /* Validating that the resource is within range */
1742         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743                 DP(NETIF_MSG_HW,
1744                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746                 return -EINVAL;
1747         }
1748
1749         if (func <= 5) {
1750                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751         } else {
1752                 hw_lock_control_reg =
1753                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754         }
1755
1756         /* Validating that the resource is currently taken */
1757         lock_status = REG_RD(bp, hw_lock_control_reg);
1758         if (!(lock_status & resource_bit)) {
1759                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1760                    lock_status, resource_bit);
1761                 return -EFAULT;
1762         }
1763
1764         REG_WR(bp, hw_lock_control_reg, resource_bit);
1765         return 0;
1766 }
1767
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1770 {
1771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1772
1773         mutex_lock(&bp->port.phy_mutex);
1774
1775         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778 }
1779
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1781 {
1782         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783
1784         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787
1788         mutex_unlock(&bp->port.phy_mutex);
1789 }
1790
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1792 {
1793         /* The GPIO should be swapped if swap register is set and active */
1794         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1796         int gpio_shift = gpio_num +
1797                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798         u32 gpio_mask = (1 << gpio_shift);
1799         u32 gpio_reg;
1800
1801         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803                 return -EINVAL;
1804         }
1805
1806         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807         /* read GPIO and mask except the float bits */
1808         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1809
1810         switch (mode) {
1811         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set CLR */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821                    gpio_num, gpio_shift);
1822                 /* clear FLOAT and set SET */
1823                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825                 break;
1826
1827         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829                    gpio_num, gpio_shift);
1830                 /* set FLOAT */
1831                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832                 break;
1833
1834         default:
1835                 break;
1836         }
1837
1838         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1840
1841         return 0;
1842 }
1843
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1845 {
1846         u32 spio_mask = (1 << spio_num);
1847         u32 spio_reg;
1848
1849         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850             (spio_num > MISC_REGISTERS_SPIO_7)) {
1851                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856         /* read SPIO and mask except the float bits */
1857         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862                 /* clear FLOAT and set CLR */
1863                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865                 break;
1866
1867         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869                 /* clear FLOAT and set SET */
1870                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872                 break;
1873
1874         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876                 /* set FLOAT */
1877                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878                 break;
1879
1880         default:
1881                 break;
1882         }
1883
1884         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1886
1887         return 0;
1888 }
1889
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1891 {
1892         switch (bp->link_vars.ieee_fc) {
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895                                           ADVERTISED_Pause);
1896                 break;
1897         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899                                          ADVERTISED_Pause);
1900                 break;
1901         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903                 break;
1904         default:
1905                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906                                           ADVERTISED_Pause);
1907                 break;
1908         }
1909 }
1910
1911 static void bnx2x_link_report(struct bnx2x *bp)
1912 {
1913         if (bp->link_vars.link_up) {
1914                 if (bp->state == BNX2X_STATE_OPEN)
1915                         netif_carrier_on(bp->dev);
1916                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1917
1918                 printk("%d Mbps ", bp->link_vars.line_speed);
1919
1920                 if (bp->link_vars.duplex == DUPLEX_FULL)
1921                         printk("full duplex");
1922                 else
1923                         printk("half duplex");
1924
1925                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927                                 printk(", receive ");
1928                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929                                         printk("& transmit ");
1930                         } else {
1931                                 printk(", transmit ");
1932                         }
1933                         printk("flow control ON");
1934                 }
1935                 printk("\n");
1936
1937         } else { /* link_down */
1938                 netif_carrier_off(bp->dev);
1939                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1940         }
1941 }
1942
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944 {
1945         if (!BP_NOMCP(bp)) {
1946                 u8 rc;
1947
1948                 /* Initialize link parameters structure variables */
1949                 bp->link_params.mtu = bp->dev->mtu;
1950
1951                 bnx2x_acquire_phy_lock(bp);
1952                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953                 bnx2x_release_phy_lock(bp);
1954
1955                 if (bp->link_vars.link_up)
1956                         bnx2x_link_report(bp);
1957
1958                 bnx2x_calc_fc_adv(bp);
1959
1960                 return rc;
1961         }
1962         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1963         return -EINVAL;
1964 }
1965
1966 static void bnx2x_link_set(struct bnx2x *bp)
1967 {
1968         if (!BP_NOMCP(bp)) {
1969                 bnx2x_acquire_phy_lock(bp);
1970                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971                 bnx2x_release_phy_lock(bp);
1972
1973                 bnx2x_calc_fc_adv(bp);
1974         } else
1975                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1976 }
1977
1978 static void bnx2x__link_reset(struct bnx2x *bp)
1979 {
1980         if (!BP_NOMCP(bp)) {
1981                 bnx2x_acquire_phy_lock(bp);
1982                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983                 bnx2x_release_phy_lock(bp);
1984         } else
1985                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1986 }
1987
1988 static u8 bnx2x_link_test(struct bnx2x *bp)
1989 {
1990         u8 rc;
1991
1992         bnx2x_acquire_phy_lock(bp);
1993         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994         bnx2x_release_phy_lock(bp);
1995
1996         return rc;
1997 }
1998
1999 /* Calculates the sum of vn_min_rates.
2000    It's needed for further normalizing of the min_rates.
2001
2002    Returns:
2003      sum of vn_min_rates
2004        or
2005      0 - if all the min_rates are 0.
2006      In the later case fainess algorithm should be deactivated.
2007      If not all min_rates are zero then those that are zeroes will
2008      be set to 1.
2009  */
2010 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2011 {
2012         int i, port = BP_PORT(bp);
2013         u32 wsum = 0;
2014         int all_zero = 1;
2015
2016         for (i = 0; i < E1HVN_MAX; i++) {
2017                 u32 vn_cfg =
2018                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022                         /* If min rate is zero - set it to 1 */
2023                         if (!vn_min_rate)
2024                                 vn_min_rate = DEF_MIN_RATE;
2025                         else
2026                                 all_zero = 0;
2027
2028                         wsum += vn_min_rate;
2029                 }
2030         }
2031
2032         /* ... only if all min rates are zeros - disable FAIRNESS */
2033         if (all_zero)
2034                 return 0;
2035
2036         return wsum;
2037 }
2038
2039 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2040                                    int en_fness,
2041                                    u16 port_rate,
2042                                    struct cmng_struct_per_port *m_cmng_port)
2043 {
2044         u32 r_param = port_rate / 8;
2045         int port = BP_PORT(bp);
2046         int i;
2047
2048         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2049
2050         /* Enable minmax only if we are in e1hmf mode */
2051         if (IS_E1HMF(bp)) {
2052                 u32 fair_periodic_timeout_usec;
2053                 u32 t_fair;
2054
2055                 /* Enable rate shaping and fairness */
2056                 m_cmng_port->flags.cmng_vn_enable = 1;
2057                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058                 m_cmng_port->flags.rate_shaping_enable = 1;
2059
2060                 if (!en_fness)
2061                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062                            "  fairness will be disabled\n");
2063
2064                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065                 m_cmng_port->rs_vars.rs_periodic_timeout =
2066                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2067
2068                 /* this is the threshold below which no timer arming will occur
2069                    1.25 coefficient is for the threshold to be a little bigger
2070                    than the real time, to compensate for timer in-accuracy */
2071                 m_cmng_port->rs_vars.rs_threshold =
2072                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2073
2074                 /* resolution of fairness timer */
2075                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077                 t_fair = T_FAIR_COEF / port_rate;
2078
2079                 /* this is the threshold below which we won't arm
2080                    the timer anymore */
2081                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2082
2083                 /* we multiply by 1e3/8 to get bytes/msec.
2084                    We don't want the credits to pass a credit
2085                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086                 m_cmng_port->fair_vars.upper_bound =
2087                                                 r_param * t_fair * FAIR_MEM;
2088                 /* since each tick is 4 usec */
2089                 m_cmng_port->fair_vars.fairness_timeout =
2090                                                 fair_periodic_timeout_usec / 4;
2091
2092         } else {
2093                 /* Disable rate shaping and fairness */
2094                 m_cmng_port->flags.cmng_vn_enable = 0;
2095                 m_cmng_port->flags.fairness_enable = 0;
2096                 m_cmng_port->flags.rate_shaping_enable = 0;
2097
2098                 DP(NETIF_MSG_IFUP,
2099                    "Single function mode  minmax will be disabled\n");
2100         }
2101
2102         /* Store it to internal memory */
2103         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106                        ((u32 *)(m_cmng_port))[i]);
2107 }
2108
2109 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110                                    u32 wsum, u16 port_rate,
2111                                  struct cmng_struct_per_port *m_cmng_port)
2112 {
2113         struct rate_shaping_vars_per_vn m_rs_vn;
2114         struct fairness_vars_per_vn m_fair_vn;
2115         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116         u16 vn_min_rate, vn_max_rate;
2117         int i;
2118
2119         /* If function is hidden - set min and max to zeroes */
2120         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2121                 vn_min_rate = 0;
2122                 vn_max_rate = 0;
2123
2124         } else {
2125                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128                    if current min rate is zero - set it to 1.
2129                    This is a requirment of the algorithm. */
2130                 if ((vn_min_rate == 0) && wsum)
2131                         vn_min_rate = DEF_MIN_RATE;
2132                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2134         }
2135
2136         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2137            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2138
2139         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2141
2142         /* global vn counter - maximal Mbps for this vn */
2143         m_rs_vn.vn_counter.rate = vn_max_rate;
2144
2145         /* quota - number of bytes transmitted in this period */
2146         m_rs_vn.vn_counter.quota =
2147                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2148
2149 #ifdef BNX2X_PER_PROT_QOS
2150         /* per protocol counter */
2151         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152                 /* maximal Mbps for this protocol */
2153                 m_rs_vn.protocol_counters[protocol].rate =
2154                                                 protocol_max_rate[protocol];
2155                 /* the quota in each timer period -
2156                    number of bytes transmitted in this period */
2157                 m_rs_vn.protocol_counters[protocol].quota =
2158                         (u32)(rs_periodic_timeout_usec *
2159                           ((double)m_rs_vn.
2160                                    protocol_counters[protocol].rate/8));
2161         }
2162 #endif
2163
2164         if (wsum) {
2165                 /* credit for each period of the fairness algorithm:
2166                    number of bytes in T_FAIR (the vn share the port rate).
2167                    wsum should not be larger than 10000, thus
2168                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169                 m_fair_vn.vn_credit_delta =
2170                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173                    m_fair_vn.vn_credit_delta);
2174         }
2175
2176 #ifdef BNX2X_PER_PROT_QOS
2177         do {
2178                 u32 protocolWeightSum = 0;
2179
2180                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181                         protocolWeightSum +=
2182                                         drvInit.protocol_min_rate[protocol];
2183                 /* per protocol counter -
2184                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185                 if (protocolWeightSum > 0) {
2186                         for (protocol = 0;
2187                              protocol < NUM_OF_PROTOCOLS; protocol++)
2188                                 /* credit for each period of the
2189                                    fairness algorithm - number of bytes in
2190                                    T_FAIR (the protocol share the vn rate) */
2191                                 m_fair_vn.protocol_credit_delta[protocol] =
2192                                         (u32)((vn_min_rate / 8) * t_fair *
2193                                         protocol_min_rate / protocolWeightSum);
2194                 }
2195         } while (0);
2196 #endif
2197
2198         /* Store it to internal memory */
2199         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202                        ((u32 *)(&m_rs_vn))[i]);
2203
2204         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207                        ((u32 *)(&m_fair_vn))[i]);
2208 }
2209
2210 /* This function is called upon link interrupt */
2211 static void bnx2x_link_attn(struct bnx2x *bp)
2212 {
2213         int vn;
2214
2215         /* Make sure that we are synced with the current statistics */
2216         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2217
2218         bnx2x_acquire_phy_lock(bp);
2219         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220         bnx2x_release_phy_lock(bp);
2221
2222         if (bp->link_vars.link_up) {
2223
2224                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225                         struct host_port_stats *pstats;
2226
2227                         pstats = bnx2x_sp(bp, port_stats);
2228                         /* reset old bmac stats */
2229                         memset(&(pstats->mac_stx[0]), 0,
2230                                sizeof(struct mac_stx));
2231                 }
2232                 if ((bp->state == BNX2X_STATE_OPEN) ||
2233                     (bp->state == BNX2X_STATE_DISABLED))
2234                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2235         }
2236
2237         /* indicate link status */
2238         bnx2x_link_report(bp);
2239
2240         if (IS_E1HMF(bp)) {
2241                 int func;
2242
2243                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244                         if (vn == BP_E1HVN(bp))
2245                                 continue;
2246
2247                         func = ((vn << 1) | BP_PORT(bp));
2248
2249                         /* Set the attention towards other drivers
2250                            on the same port */
2251                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2253                 }
2254         }
2255
2256         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257                 struct cmng_struct_per_port m_cmng_port;
2258                 u32 wsum;
2259                 int port = BP_PORT(bp);
2260
2261                 /* Init RATE SHAPING and FAIRNESS contexts */
2262                 wsum = bnx2x_calc_vn_wsum(bp);
2263                 bnx2x_init_port_minmax(bp, (int)wsum,
2264                                         bp->link_vars.line_speed,
2265                                         &m_cmng_port);
2266                 if (IS_E1HMF(bp))
2267                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269                                         wsum, bp->link_vars.line_speed,
2270                                                      &m_cmng_port);
2271         }
2272 }
2273
2274 static void bnx2x__link_status_update(struct bnx2x *bp)
2275 {
2276         if (bp->state != BNX2X_STATE_OPEN)
2277                 return;
2278
2279         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2280
2281         if (bp->link_vars.link_up)
2282                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2283         else
2284                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2285
2286         /* indicate link status */
2287         bnx2x_link_report(bp);
2288 }
2289
2290 static void bnx2x_pmf_update(struct bnx2x *bp)
2291 {
2292         int port = BP_PORT(bp);
2293         u32 val;
2294
2295         bp->port.pmf = 1;
2296         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2297
2298         /* enable nig attention */
2299         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2302
2303         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2304 }
2305
2306 /* end of Link */
2307
2308 /* slow path */
2309
2310 /*
2311  * General service functions
2312  */
2313
2314 /* the slow path queue is odd since completions arrive on the fastpath ring */
2315 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316                          u32 data_hi, u32 data_lo, int common)
2317 {
2318         int func = BP_FUNC(bp);
2319
2320         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2322            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2325
2326 #ifdef BNX2X_STOP_ON_ERROR
2327         if (unlikely(bp->panic))
2328                 return -EIO;
2329 #endif
2330
2331         spin_lock_bh(&bp->spq_lock);
2332
2333         if (!bp->spq_left) {
2334                 BNX2X_ERR("BUG! SPQ ring full!\n");
2335                 spin_unlock_bh(&bp->spq_lock);
2336                 bnx2x_panic();
2337                 return -EBUSY;
2338         }
2339
2340         /* CID needs port number to be encoded int it */
2341         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2343                                      HW_CID(bp, cid)));
2344         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2345         if (common)
2346                 bp->spq_prod_bd->hdr.type |=
2347                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2348
2349         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2351
2352         bp->spq_left--;
2353
2354         if (bp->spq_prod_bd == bp->spq_last_bd) {
2355                 bp->spq_prod_bd = bp->spq;
2356                 bp->spq_prod_idx = 0;
2357                 DP(NETIF_MSG_TIMER, "end of spq\n");
2358
2359         } else {
2360                 bp->spq_prod_bd++;
2361                 bp->spq_prod_idx++;
2362         }
2363
2364         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2365                bp->spq_prod_idx);
2366
2367         spin_unlock_bh(&bp->spq_lock);
2368         return 0;
2369 }
2370
2371 /* acquire split MCP access lock register */
2372 static int bnx2x_acquire_alr(struct bnx2x *bp)
2373 {
2374         u32 i, j, val;
2375         int rc = 0;
2376
2377         might_sleep();
2378         i = 100;
2379         for (j = 0; j < i*10; j++) {
2380                 val = (1UL << 31);
2381                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383                 if (val & (1L << 31))
2384                         break;
2385
2386                 msleep(5);
2387         }
2388         if (!(val & (1L << 31))) {
2389                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2390                 rc = -EBUSY;
2391         }
2392
2393         return rc;
2394 }
2395
2396 /* release split MCP access lock register */
2397 static void bnx2x_release_alr(struct bnx2x *bp)
2398 {
2399         u32 val = 0;
2400
2401         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2402 }
2403
2404 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2405 {
2406         struct host_def_status_block *def_sb = bp->def_status_blk;
2407         u16 rc = 0;
2408
2409         barrier(); /* status block is written to by the chip */
2410         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2411                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2412                 rc |= 1;
2413         }
2414         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2415                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2416                 rc |= 2;
2417         }
2418         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2419                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2420                 rc |= 4;
2421         }
2422         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2423                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2424                 rc |= 8;
2425         }
2426         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2427                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2428                 rc |= 16;
2429         }
2430         return rc;
2431 }
2432
2433 /*
2434  * slow path service functions
2435  */
2436
2437 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2438 {
2439         int port = BP_PORT(bp);
2440         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2441                        COMMAND_REG_ATTN_BITS_SET);
2442         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2443                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2444         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2445                                        NIG_REG_MASK_INTERRUPT_PORT0;
2446         u32 aeu_mask;
2447
2448         if (bp->attn_state & asserted)
2449                 BNX2X_ERR("IGU ERROR\n");
2450
2451         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2452         aeu_mask = REG_RD(bp, aeu_addr);
2453
2454         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2455            aeu_mask, asserted);
2456         aeu_mask &= ~(asserted & 0xff);
2457         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2458
2459         REG_WR(bp, aeu_addr, aeu_mask);
2460         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2461
2462         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2463         bp->attn_state |= asserted;
2464         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2465
2466         if (asserted & ATTN_HARD_WIRED_MASK) {
2467                 if (asserted & ATTN_NIG_FOR_FUNC) {
2468
2469                         /* save nig interrupt mask */
2470                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2471                         REG_WR(bp, nig_int_mask_addr, 0);
2472
2473                         bnx2x_link_attn(bp);
2474
2475                         /* handle unicore attn? */
2476                 }
2477                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2478                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2479
2480                 if (asserted & GPIO_2_FUNC)
2481                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2482
2483                 if (asserted & GPIO_3_FUNC)
2484                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2485
2486                 if (asserted & GPIO_4_FUNC)
2487                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2488
2489                 if (port == 0) {
2490                         if (asserted & ATTN_GENERAL_ATTN_1) {
2491                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2492                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2493                         }
2494                         if (asserted & ATTN_GENERAL_ATTN_2) {
2495                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2496                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2497                         }
2498                         if (asserted & ATTN_GENERAL_ATTN_3) {
2499                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2500                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2501                         }
2502                 } else {
2503                         if (asserted & ATTN_GENERAL_ATTN_4) {
2504                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2505                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2506                         }
2507                         if (asserted & ATTN_GENERAL_ATTN_5) {
2508                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2509                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2510                         }
2511                         if (asserted & ATTN_GENERAL_ATTN_6) {
2512                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2513                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2514                         }
2515                 }
2516
2517         } /* if hardwired */
2518
2519         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2520            asserted, hc_addr);
2521         REG_WR(bp, hc_addr, asserted);
2522
2523         /* now set back the mask */
2524         if (asserted & ATTN_NIG_FOR_FUNC)
2525                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2526 }
2527
2528 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2529 {
2530         int port = BP_PORT(bp);
2531         int reg_offset;
2532         u32 val;
2533
2534         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2535                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2536
2537         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2538
2539                 val = REG_RD(bp, reg_offset);
2540                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2541                 REG_WR(bp, reg_offset, val);
2542
2543                 BNX2X_ERR("SPIO5 hw attention\n");
2544
2545                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2546                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2547                         /* Fan failure attention */
2548
2549                         /* The PHY reset is controled by GPIO 1 */
2550                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2551                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2552                         /* Low power mode is controled by GPIO 2 */
2553                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2554                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2555                         /* mark the failure */
2556                         bp->link_params.ext_phy_config &=
2557                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2558                         bp->link_params.ext_phy_config |=
2559                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2560                         SHMEM_WR(bp,
2561                                  dev_info.port_hw_config[port].
2562                                                         external_phy_config,
2563                                  bp->link_params.ext_phy_config);
2564                         /* log the failure */
2565                         printk(KERN_ERR PFX "Fan Failure on Network"
2566                                " Controller %s has caused the driver to"
2567                                " shutdown the card to prevent permanent"
2568                                " damage.  Please contact Dell Support for"
2569                                " assistance\n", bp->dev->name);
2570                         break;
2571
2572                 default:
2573                         break;
2574                 }
2575         }
2576
2577         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2578
2579                 val = REG_RD(bp, reg_offset);
2580                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2581                 REG_WR(bp, reg_offset, val);
2582
2583                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2584                           (attn & HW_INTERRUT_ASSERT_SET_0));
2585                 bnx2x_panic();
2586         }
2587 }
2588
2589 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2590 {
2591         u32 val;
2592
2593         if (attn & BNX2X_DOORQ_ASSERT) {
2594
2595                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2596                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2597                 /* DORQ discard attention */
2598                 if (val & 0x2)
2599                         BNX2X_ERR("FATAL error from DORQ\n");
2600         }
2601
2602         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2603
2604                 int port = BP_PORT(bp);
2605                 int reg_offset;
2606
2607                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2608                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2609
2610                 val = REG_RD(bp, reg_offset);
2611                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2612                 REG_WR(bp, reg_offset, val);
2613
2614                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2615                           (attn & HW_INTERRUT_ASSERT_SET_1));
2616                 bnx2x_panic();
2617         }
2618 }
2619
2620 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2621 {
2622         u32 val;
2623
2624         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2625
2626                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2627                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2628                 /* CFC error attention */
2629                 if (val & 0x2)
2630                         BNX2X_ERR("FATAL error from CFC\n");
2631         }
2632
2633         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2634
2635                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2636                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2637                 /* RQ_USDMDP_FIFO_OVERFLOW */
2638                 if (val & 0x18000)
2639                         BNX2X_ERR("FATAL error from PXP\n");
2640         }
2641
2642         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2643
2644                 int port = BP_PORT(bp);
2645                 int reg_offset;
2646
2647                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2648                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2649
2650                 val = REG_RD(bp, reg_offset);
2651                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2652                 REG_WR(bp, reg_offset, val);
2653
2654                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2655                           (attn & HW_INTERRUT_ASSERT_SET_2));
2656                 bnx2x_panic();
2657         }
2658 }
2659
2660 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2661 {
2662         u32 val;
2663
2664         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2665
2666                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2667                         int func = BP_FUNC(bp);
2668
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2670                         bnx2x__link_status_update(bp);
2671                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2672                                                         DRV_STATUS_PMF)
2673                                 bnx2x_pmf_update(bp);
2674
2675                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2676
2677                         BNX2X_ERR("MC assert!\n");
2678                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2679                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2680                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2681                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2682                         bnx2x_panic();
2683
2684                 } else if (attn & BNX2X_MCP_ASSERT) {
2685
2686                         BNX2X_ERR("MCP assert!\n");
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2688                         bnx2x_fw_dump(bp);
2689
2690                 } else
2691                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2692         }
2693
2694         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2695                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2696                 if (attn & BNX2X_GRC_TIMEOUT) {
2697                         val = CHIP_IS_E1H(bp) ?
2698                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2699                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2700                 }
2701                 if (attn & BNX2X_GRC_RSV) {
2702                         val = CHIP_IS_E1H(bp) ?
2703                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2704                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2705                 }
2706                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2707         }
2708 }
2709
2710 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2711 {
2712         struct attn_route attn;
2713         struct attn_route group_mask;
2714         int port = BP_PORT(bp);
2715         int index;
2716         u32 reg_addr;
2717         u32 val;
2718         u32 aeu_mask;
2719
2720         /* need to take HW lock because MCP or other port might also
2721            try to handle this event */
2722         bnx2x_acquire_alr(bp);
2723
2724         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2725         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2726         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2727         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2728         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2729            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2730
2731         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2732                 if (deasserted & (1 << index)) {
2733                         group_mask = bp->attn_group[index];
2734
2735                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2736                            index, group_mask.sig[0], group_mask.sig[1],
2737                            group_mask.sig[2], group_mask.sig[3]);
2738
2739                         bnx2x_attn_int_deasserted3(bp,
2740                                         attn.sig[3] & group_mask.sig[3]);
2741                         bnx2x_attn_int_deasserted1(bp,
2742                                         attn.sig[1] & group_mask.sig[1]);
2743                         bnx2x_attn_int_deasserted2(bp,
2744                                         attn.sig[2] & group_mask.sig[2]);
2745                         bnx2x_attn_int_deasserted0(bp,
2746                                         attn.sig[0] & group_mask.sig[0]);
2747
2748                         if ((attn.sig[0] & group_mask.sig[0] &
2749                                                 HW_PRTY_ASSERT_SET_0) ||
2750                             (attn.sig[1] & group_mask.sig[1] &
2751                                                 HW_PRTY_ASSERT_SET_1) ||
2752                             (attn.sig[2] & group_mask.sig[2] &
2753                                                 HW_PRTY_ASSERT_SET_2))
2754                                BNX2X_ERR("FATAL HW block parity attention\n");
2755                 }
2756         }
2757
2758         bnx2x_release_alr(bp);
2759
2760         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2761
2762         val = ~deasserted;
2763         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2764            val, reg_addr);
2765         REG_WR(bp, reg_addr, val);
2766
2767         if (~bp->attn_state & deasserted)
2768                 BNX2X_ERR("IGU ERROR\n");
2769
2770         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2771                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2772
2773         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774         aeu_mask = REG_RD(bp, reg_addr);
2775
2776         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2777            aeu_mask, deasserted);
2778         aeu_mask |= (deasserted & 0xff);
2779         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2780
2781         REG_WR(bp, reg_addr, aeu_mask);
2782         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2783
2784         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2785         bp->attn_state &= ~deasserted;
2786         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2787 }
2788
2789 static void bnx2x_attn_int(struct bnx2x *bp)
2790 {
2791         /* read local copy of bits */
2792         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2793         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2794         u32 attn_state = bp->attn_state;
2795
2796         /* look for changed bits */
2797         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2798         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2799
2800         DP(NETIF_MSG_HW,
2801            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2802            attn_bits, attn_ack, asserted, deasserted);
2803
2804         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2805                 BNX2X_ERR("BAD attention state\n");
2806
2807         /* handle bits that were raised */
2808         if (asserted)
2809                 bnx2x_attn_int_asserted(bp, asserted);
2810
2811         if (deasserted)
2812                 bnx2x_attn_int_deasserted(bp, deasserted);
2813 }
2814
2815 static void bnx2x_sp_task(struct work_struct *work)
2816 {
2817         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2818         u16 status;
2819
2820
2821         /* Return here if interrupt is disabled */
2822         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2823                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2824                 return;
2825         }
2826
2827         status = bnx2x_update_dsb_idx(bp);
2828 /*      if (status == 0)                                     */
2829 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2830
2831         DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2832
2833         /* HW attentions */
2834         if (status & 0x1)
2835                 bnx2x_attn_int(bp);
2836
2837         /* CStorm events: query_stats, port delete ramrod */
2838         if (status & 0x2)
2839                 bp->stats_pending = 0;
2840
2841         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2842                      IGU_INT_NOP, 1);
2843         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2844                      IGU_INT_NOP, 1);
2845         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2846                      IGU_INT_NOP, 1);
2847         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2848                      IGU_INT_NOP, 1);
2849         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2850                      IGU_INT_ENABLE, 1);
2851
2852 }
2853
2854 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2855 {
2856         struct net_device *dev = dev_instance;
2857         struct bnx2x *bp = netdev_priv(dev);
2858
2859         /* Return here if interrupt is disabled */
2860         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2861                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2862                 return IRQ_HANDLED;
2863         }
2864
2865         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2866
2867 #ifdef BNX2X_STOP_ON_ERROR
2868         if (unlikely(bp->panic))
2869                 return IRQ_HANDLED;
2870 #endif
2871
2872         schedule_work(&bp->sp_task);
2873
2874         return IRQ_HANDLED;
2875 }
2876
2877 /* end of slow path */
2878
2879 /* Statistics */
2880
2881 /****************************************************************************
2882 * Macros
2883 ****************************************************************************/
2884
2885 /* sum[hi:lo] += add[hi:lo] */
2886 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2887         do { \
2888                 s_lo += a_lo; \
2889                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2890         } while (0)
2891
2892 /* difference = minuend - subtrahend */
2893 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2894         do { \
2895                 if (m_lo < s_lo) { \
2896                         /* underflow */ \
2897                         d_hi = m_hi - s_hi; \
2898                         if (d_hi > 0) { \
2899                         /* we can 'loan' 1 */ \
2900                                 d_hi--; \
2901                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2902                         } else { \
2903                         /* m_hi <= s_hi */ \
2904                                 d_hi = 0; \
2905                                 d_lo = 0; \
2906                         } \
2907                 } else { \
2908                         /* m_lo >= s_lo */ \
2909                         if (m_hi < s_hi) { \
2910                                 d_hi = 0; \
2911                                 d_lo = 0; \
2912                         } else { \
2913                         /* m_hi >= s_hi */ \
2914                                 d_hi = m_hi - s_hi; \
2915                                 d_lo = m_lo - s_lo; \
2916                         } \
2917                 } \
2918         } while (0)
2919
2920 #define UPDATE_STAT64(s, t) \
2921         do { \
2922                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2923                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2924                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2925                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2926                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2927                        pstats->mac_stx[1].t##_lo, diff.lo); \
2928         } while (0)
2929
2930 #define UPDATE_STAT64_NIG(s, t) \
2931         do { \
2932                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2933                         diff.lo, new->s##_lo, old->s##_lo); \
2934                 ADD_64(estats->t##_hi, diff.hi, \
2935                        estats->t##_lo, diff.lo); \
2936         } while (0)
2937
2938 /* sum[hi:lo] += add */
2939 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2940         do { \
2941                 s_lo += a; \
2942                 s_hi += (s_lo < a) ? 1 : 0; \
2943         } while (0)
2944
2945 #define UPDATE_EXTEND_STAT(s) \
2946         do { \
2947                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2948                               pstats->mac_stx[1].s##_lo, \
2949                               new->s); \
2950         } while (0)
2951
2952 #define UPDATE_EXTEND_TSTAT(s, t) \
2953         do { \
2954                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2955                 old_tclient->s = le32_to_cpu(tclient->s); \
2956                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2957         } while (0)
2958
2959 #define UPDATE_EXTEND_XSTAT(s, t) \
2960         do { \
2961                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2962                 old_xclient->s = le32_to_cpu(xclient->s); \
2963                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964         } while (0)
2965
2966 /*
2967  * General service functions
2968  */
2969
2970 static inline long bnx2x_hilo(u32 *hiref)
2971 {
2972         u32 lo = *(hiref + 1);
2973 #if (BITS_PER_LONG == 64)
2974         u32 hi = *hiref;
2975
2976         return HILO_U64(hi, lo);
2977 #else
2978         return lo;
2979 #endif
2980 }
2981
2982 /*
2983  * Init service functions
2984  */
2985
2986 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2987 {
2988         if (!bp->stats_pending) {
2989                 struct eth_query_ramrod_data ramrod_data = {0};
2990                 int rc;
2991
2992                 ramrod_data.drv_counter = bp->stats_counter++;
2993                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2994                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2995
2996                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2997                                    ((u32 *)&ramrod_data)[1],
2998                                    ((u32 *)&ramrod_data)[0], 0);
2999                 if (rc == 0) {
3000                         /* stats ramrod has it's own slot on the spq */
3001                         bp->spq_left++;
3002                         bp->stats_pending = 1;
3003                 }
3004         }
3005 }
3006
3007 static void bnx2x_stats_init(struct bnx2x *bp)
3008 {
3009         int port = BP_PORT(bp);
3010
3011         bp->executer_idx = 0;
3012         bp->stats_counter = 0;
3013
3014         /* port stats */
3015         if (!BP_NOMCP(bp))
3016                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3017         else
3018                 bp->port.port_stx = 0;
3019         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3020
3021         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3022         bp->port.old_nig_stats.brb_discard =
3023                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3024         bp->port.old_nig_stats.brb_truncate =
3025                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3026         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3027                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3028         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3029                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3030
3031         /* function stats */
3032         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3033         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3034         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3035         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3036
3037         bp->stats_state = STATS_STATE_DISABLED;
3038         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3039                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3040 }
3041
3042 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3043 {
3044         struct dmae_command *dmae = &bp->stats_dmae;
3045         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3046
3047         *stats_comp = DMAE_COMP_VAL;
3048
3049         /* loader */
3050         if (bp->executer_idx) {
3051                 int loader_idx = PMF_DMAE_C(bp);
3052
3053                 memset(dmae, 0, sizeof(struct dmae_command));
3054
3055                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3056                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3057                                 DMAE_CMD_DST_RESET |
3058 #ifdef __BIG_ENDIAN
3059                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3060 #else
3061                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3062 #endif
3063                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3064                                                DMAE_CMD_PORT_0) |
3065                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3066                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3067                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3068                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3069                                      sizeof(struct dmae_command) *
3070                                      (loader_idx + 1)) >> 2;
3071                 dmae->dst_addr_hi = 0;
3072                 dmae->len = sizeof(struct dmae_command) >> 2;
3073                 if (CHIP_IS_E1(bp))
3074                         dmae->len--;
3075                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3076                 dmae->comp_addr_hi = 0;
3077                 dmae->comp_val = 1;
3078
3079                 *stats_comp = 0;
3080                 bnx2x_post_dmae(bp, dmae, loader_idx);
3081
3082         } else if (bp->func_stx) {
3083                 *stats_comp = 0;
3084                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3085         }
3086 }
3087
3088 static int bnx2x_stats_comp(struct bnx2x *bp)
3089 {
3090         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091         int cnt = 10;
3092
3093         might_sleep();
3094         while (*stats_comp != DMAE_COMP_VAL) {
3095                 if (!cnt) {
3096                         BNX2X_ERR("timeout waiting for stats finished\n");
3097                         break;
3098                 }
3099                 cnt--;
3100                 msleep(1);
3101         }
3102         return 1;
3103 }
3104
3105 /*
3106  * Statistics service functions
3107  */
3108
3109 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3110 {
3111         struct dmae_command *dmae;
3112         u32 opcode;
3113         int loader_idx = PMF_DMAE_C(bp);
3114         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3115
3116         /* sanity */
3117         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3118                 BNX2X_ERR("BUG!\n");
3119                 return;
3120         }
3121
3122         bp->executer_idx = 0;
3123
3124         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3125                   DMAE_CMD_C_ENABLE |
3126                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3127 #ifdef __BIG_ENDIAN
3128                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3129 #else
3130                   DMAE_CMD_ENDIANITY_DW_SWAP |
3131 #endif
3132                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3133                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3134
3135         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3136         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3137         dmae->src_addr_lo = bp->port.port_stx >> 2;
3138         dmae->src_addr_hi = 0;
3139         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3140         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3141         dmae->len = DMAE_LEN32_RD_MAX;
3142         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3143         dmae->comp_addr_hi = 0;
3144         dmae->comp_val = 1;
3145
3146         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3147         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3148         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3149         dmae->src_addr_hi = 0;
3150         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3151                                    DMAE_LEN32_RD_MAX * 4);
3152         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3153                                    DMAE_LEN32_RD_MAX * 4);
3154         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3155         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3156         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3157         dmae->comp_val = DMAE_COMP_VAL;
3158
3159         *stats_comp = 0;
3160         bnx2x_hw_stats_post(bp);
3161         bnx2x_stats_comp(bp);
3162 }
3163
3164 static void bnx2x_port_stats_init(struct bnx2x *bp)
3165 {
3166         struct dmae_command *dmae;
3167         int port = BP_PORT(bp);
3168         int vn = BP_E1HVN(bp);
3169         u32 opcode;
3170         int loader_idx = PMF_DMAE_C(bp);
3171         u32 mac_addr;
3172         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3173
3174         /* sanity */
3175         if (!bp->link_vars.link_up || !bp->port.pmf) {
3176                 BNX2X_ERR("BUG!\n");
3177                 return;
3178         }
3179
3180         bp->executer_idx = 0;
3181
3182         /* MCP */
3183         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3184                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3185                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3186 #ifdef __BIG_ENDIAN
3187                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3188 #else
3189                   DMAE_CMD_ENDIANITY_DW_SWAP |
3190 #endif
3191                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3192                   (vn << DMAE_CMD_E1HVN_SHIFT));
3193
3194         if (bp->port.port_stx) {
3195
3196                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3197                 dmae->opcode = opcode;
3198                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3199                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3200                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3201                 dmae->dst_addr_hi = 0;
3202                 dmae->len = sizeof(struct host_port_stats) >> 2;
3203                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3204                 dmae->comp_addr_hi = 0;
3205                 dmae->comp_val = 1;
3206         }
3207
3208         if (bp->func_stx) {
3209
3210                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3211                 dmae->opcode = opcode;
3212                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3213                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3214                 dmae->dst_addr_lo = bp->func_stx >> 2;
3215                 dmae->dst_addr_hi = 0;
3216                 dmae->len = sizeof(struct host_func_stats) >> 2;
3217                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3218                 dmae->comp_addr_hi = 0;
3219                 dmae->comp_val = 1;
3220         }
3221
3222         /* MAC */
3223         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3224                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3225                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3226 #ifdef __BIG_ENDIAN
3227                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3228 #else
3229                   DMAE_CMD_ENDIANITY_DW_SWAP |
3230 #endif
3231                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3232                   (vn << DMAE_CMD_E1HVN_SHIFT));
3233
3234         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3235
3236                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3237                                    NIG_REG_INGRESS_BMAC0_MEM);
3238
3239                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3240                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3241                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242                 dmae->opcode = opcode;
3243                 dmae->src_addr_lo = (mac_addr +
3244                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3245                 dmae->src_addr_hi = 0;
3246                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3247                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3248                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3249                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3250                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3251                 dmae->comp_addr_hi = 0;
3252                 dmae->comp_val = 1;
3253
3254                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3255                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3256                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257                 dmae->opcode = opcode;
3258                 dmae->src_addr_lo = (mac_addr +
3259                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3260                 dmae->src_addr_hi = 0;
3261                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3262                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3263                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3264                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3265                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3266                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3268                 dmae->comp_addr_hi = 0;
3269                 dmae->comp_val = 1;
3270
3271         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3272
3273                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3274
3275                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3276                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3277                 dmae->opcode = opcode;
3278                 dmae->src_addr_lo = (mac_addr +
3279                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3280                 dmae->src_addr_hi = 0;
3281                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3282                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3283                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3284                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285                 dmae->comp_addr_hi = 0;
3286                 dmae->comp_val = 1;
3287
3288                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3289                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3290                 dmae->opcode = opcode;
3291                 dmae->src_addr_lo = (mac_addr +
3292                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3293                 dmae->src_addr_hi = 0;
3294                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3295                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3296                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3297                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3298                 dmae->len = 1;
3299                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3300                 dmae->comp_addr_hi = 0;
3301                 dmae->comp_val = 1;
3302
3303                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3304                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305                 dmae->opcode = opcode;
3306                 dmae->src_addr_lo = (mac_addr +
3307                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3308                 dmae->src_addr_hi = 0;
3309                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3310                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3311                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3312                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3313                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3314                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315                 dmae->comp_addr_hi = 0;
3316                 dmae->comp_val = 1;
3317         }
3318
3319         /* NIG */
3320         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321         dmae->opcode = opcode;
3322         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3323                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3324         dmae->src_addr_hi = 0;
3325         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3326         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3327         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3328         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329         dmae->comp_addr_hi = 0;
3330         dmae->comp_val = 1;
3331
3332         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3333         dmae->opcode = opcode;
3334         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3335                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3336         dmae->src_addr_hi = 0;
3337         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3338                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3339         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3340                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3341         dmae->len = (2*sizeof(u32)) >> 2;
3342         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3343         dmae->comp_addr_hi = 0;
3344         dmae->comp_val = 1;
3345
3346         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3348                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3349                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3350 #ifdef __BIG_ENDIAN
3351                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3352 #else
3353                         DMAE_CMD_ENDIANITY_DW_SWAP |
3354 #endif
3355                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3356                         (vn << DMAE_CMD_E1HVN_SHIFT));
3357         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3358                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3359         dmae->src_addr_hi = 0;
3360         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3361                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3362         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3363                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3364         dmae->len = (2*sizeof(u32)) >> 2;
3365         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3366         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3367         dmae->comp_val = DMAE_COMP_VAL;
3368
3369         *stats_comp = 0;
3370 }
3371
3372 static void bnx2x_func_stats_init(struct bnx2x *bp)
3373 {
3374         struct dmae_command *dmae = &bp->stats_dmae;
3375         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3376
3377         /* sanity */
3378         if (!bp->func_stx) {
3379                 BNX2X_ERR("BUG!\n");
3380                 return;
3381         }
3382
3383         bp->executer_idx = 0;
3384         memset(dmae, 0, sizeof(struct dmae_command));
3385
3386         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3387                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3388                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3389 #ifdef __BIG_ENDIAN
3390                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3391 #else
3392                         DMAE_CMD_ENDIANITY_DW_SWAP |
3393 #endif
3394                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3395                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3396         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3397         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3398         dmae->dst_addr_lo = bp->func_stx >> 2;
3399         dmae->dst_addr_hi = 0;
3400         dmae->len = sizeof(struct host_func_stats) >> 2;
3401         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3402         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3403         dmae->comp_val = DMAE_COMP_VAL;
3404
3405         *stats_comp = 0;
3406 }
3407
3408 static void bnx2x_stats_start(struct bnx2x *bp)
3409 {
3410         if (bp->port.pmf)
3411                 bnx2x_port_stats_init(bp);
3412
3413         else if (bp->func_stx)
3414                 bnx2x_func_stats_init(bp);
3415
3416         bnx2x_hw_stats_post(bp);
3417         bnx2x_storm_stats_post(bp);
3418 }
3419
3420 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3421 {
3422         bnx2x_stats_comp(bp);
3423         bnx2x_stats_pmf_update(bp);
3424         bnx2x_stats_start(bp);
3425 }
3426
3427 static void bnx2x_stats_restart(struct bnx2x *bp)
3428 {
3429         bnx2x_stats_comp(bp);
3430         bnx2x_stats_start(bp);
3431 }
3432
3433 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3434 {
3435         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3436         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3437         struct regpair diff;
3438
3439         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3440         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3441         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3442         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3443         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3444         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3445         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3446         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3447         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3448         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3449         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3450         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3451         UPDATE_STAT64(tx_stat_gt127,
3452                                 tx_stat_etherstatspkts65octetsto127octets);
3453         UPDATE_STAT64(tx_stat_gt255,
3454                                 tx_stat_etherstatspkts128octetsto255octets);
3455         UPDATE_STAT64(tx_stat_gt511,
3456                                 tx_stat_etherstatspkts256octetsto511octets);
3457         UPDATE_STAT64(tx_stat_gt1023,
3458                                 tx_stat_etherstatspkts512octetsto1023octets);
3459         UPDATE_STAT64(tx_stat_gt1518,
3460                                 tx_stat_etherstatspkts1024octetsto1522octets);
3461         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3462         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3463         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3464         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3465         UPDATE_STAT64(tx_stat_gterr,
3466                                 tx_stat_dot3statsinternalmactransmiterrors);
3467         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3468 }
3469
3470 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3471 {
3472         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3473         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3474
3475         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3476         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3477         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3478         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3479         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3480         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3481         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3482         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3483         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3484         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3485         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3486         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3487         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3488         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3489         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3490         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3491         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3492         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3493         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3494         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3495         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3496         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3497         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3498         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3500         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3501         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3502         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3503         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3504         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3505         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3506 }
3507
3508 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3509 {
3510         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3511         struct nig_stats *old = &(bp->port.old_nig_stats);
3512         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3513         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3514         struct regpair diff;
3515
3516         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3517                 bnx2x_bmac_stats_update(bp);
3518
3519         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3520                 bnx2x_emac_stats_update(bp);
3521
3522         else { /* unreached */
3523                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3524                 return -1;
3525         }
3526
3527         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3528                       new->brb_discard - old->brb_discard);
3529         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3530                       new->brb_truncate - old->brb_truncate);
3531
3532         UPDATE_STAT64_NIG(egress_mac_pkt0,
3533                                         etherstatspkts1024octetsto1522octets);
3534         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3535
3536         memcpy(old, new, sizeof(struct nig_stats));
3537
3538         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3539                sizeof(struct mac_stx));
3540         estats->brb_drop_hi = pstats->brb_drop_hi;
3541         estats->brb_drop_lo = pstats->brb_drop_lo;
3542
3543         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3544
3545         return 0;
3546 }
3547
3548 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3549 {
3550         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3551         int cl_id = BP_CL_ID(bp);
3552         struct tstorm_per_port_stats *tport =
3553                                 &stats->tstorm_common.port_statistics;
3554         struct tstorm_per_client_stats *tclient =
3555                         &stats->tstorm_common.client_statistics[cl_id];
3556         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3557         struct xstorm_per_client_stats *xclient =
3558                         &stats->xstorm_common.client_statistics[cl_id];
3559         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3560         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3561         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3562         u32 diff;
3563
3564         /* are storm stats valid? */
3565         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3566                                                         bp->stats_counter) {
3567                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3568                    "  tstorm counter (%d) != stats_counter (%d)\n",
3569                    tclient->stats_counter, bp->stats_counter);
3570                 return -1;
3571         }
3572         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3573                                                         bp->stats_counter) {
3574                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3575                    "  xstorm counter (%d) != stats_counter (%d)\n",
3576                    xclient->stats_counter, bp->stats_counter);
3577                 return -2;
3578         }
3579
3580         fstats->total_bytes_received_hi =
3581         fstats->valid_bytes_received_hi =
3582                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3583         fstats->total_bytes_received_lo =
3584         fstats->valid_bytes_received_lo =
3585                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3586
3587         estats->error_bytes_received_hi =
3588                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3589         estats->error_bytes_received_lo =
3590                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3591         ADD_64(estats->error_bytes_received_hi,
3592                estats->rx_stat_ifhcinbadoctets_hi,
3593                estats->error_bytes_received_lo,
3594                estats->rx_stat_ifhcinbadoctets_lo);
3595
3596         ADD_64(fstats->total_bytes_received_hi,
3597                estats->error_bytes_received_hi,
3598                fstats->total_bytes_received_lo,
3599                estats->error_bytes_received_lo);
3600
3601         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3602         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3603                                 total_multicast_packets_received);
3604         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3605                                 total_broadcast_packets_received);
3606
3607         fstats->total_bytes_transmitted_hi =
3608                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3609         fstats->total_bytes_transmitted_lo =
3610                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3611
3612         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3613                                 total_unicast_packets_transmitted);
3614         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3615                                 total_multicast_packets_transmitted);
3616         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3617                                 total_broadcast_packets_transmitted);
3618
3619         memcpy(estats, &(fstats->total_bytes_received_hi),
3620                sizeof(struct host_func_stats) - 2*sizeof(u32));
3621
3622         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3623         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3624         estats->brb_truncate_discard =
3625                                 le32_to_cpu(tport->brb_truncate_discard);
3626         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3627
3628         old_tclient->rcv_unicast_bytes.hi =
3629                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3630         old_tclient->rcv_unicast_bytes.lo =
3631                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3632         old_tclient->rcv_broadcast_bytes.hi =
3633                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3634         old_tclient->rcv_broadcast_bytes.lo =
3635                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3636         old_tclient->rcv_multicast_bytes.hi =
3637                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3638         old_tclient->rcv_multicast_bytes.lo =
3639                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3640         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3641
3642         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3643         old_tclient->packets_too_big_discard =
3644                                 le32_to_cpu(tclient->packets_too_big_discard);
3645         estats->no_buff_discard =
3646         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3647         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3648
3649         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3650         old_xclient->unicast_bytes_sent.hi =
3651                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3652         old_xclient->unicast_bytes_sent.lo =
3653                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3654         old_xclient->multicast_bytes_sent.hi =
3655                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3656         old_xclient->multicast_bytes_sent.lo =
3657                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3658         old_xclient->broadcast_bytes_sent.hi =
3659                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3660         old_xclient->broadcast_bytes_sent.lo =
3661                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3662
3663         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3664
3665         return 0;
3666 }
3667
3668 static void bnx2x_net_stats_update(struct bnx2x *bp)
3669 {
3670         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3671         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3672         struct net_device_stats *nstats = &bp->dev->stats;
3673
3674         nstats->rx_packets =
3675                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3676                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3677                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3678
3679         nstats->tx_packets =
3680                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3681                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3682                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3683
3684         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3685
3686         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3687
3688         nstats->rx_dropped = old_tclient->checksum_discard +
3689                              estats->mac_discard;
3690         nstats->tx_dropped = 0;
3691
3692         nstats->multicast =
3693                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3694
3695         nstats->collisions =
3696                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3697                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3698                         estats->tx_stat_dot3statslatecollisions_lo +
3699                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3700
3701         estats->jabber_packets_received =
3702                                 old_tclient->packets_too_big_discard +
3703                                 estats->rx_stat_dot3statsframestoolong_lo;
3704
3705         nstats->rx_length_errors =
3706                                 estats->rx_stat_etherstatsundersizepkts_lo +
3707                                 estats->jabber_packets_received;
3708         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3709         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3710         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3711         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3712         nstats->rx_missed_errors = estats->xxoverflow_discard;
3713
3714         nstats->rx_errors = nstats->rx_length_errors +
3715                             nstats->rx_over_errors +
3716                             nstats->rx_crc_errors +
3717                             nstats->rx_frame_errors +
3718                             nstats->rx_fifo_errors +
3719                             nstats->rx_missed_errors;
3720
3721         nstats->tx_aborted_errors =
3722                         estats->tx_stat_dot3statslatecollisions_lo +
3723                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3724         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3725         nstats->tx_fifo_errors = 0;
3726         nstats->tx_heartbeat_errors = 0;
3727         nstats->tx_window_errors = 0;
3728
3729         nstats->tx_errors = nstats->tx_aborted_errors +
3730                             nstats->tx_carrier_errors;
3731 }
3732
3733 static void bnx2x_stats_update(struct bnx2x *bp)
3734 {
3735         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3736         int update = 0;
3737
3738         if (*stats_comp != DMAE_COMP_VAL)
3739                 return;
3740
3741         if (bp->port.pmf)
3742                 update = (bnx2x_hw_stats_update(bp) == 0);
3743
3744         update |= (bnx2x_storm_stats_update(bp) == 0);
3745
3746         if (update)
3747                 bnx2x_net_stats_update(bp);
3748
3749         else {
3750                 if (bp->stats_pending) {
3751                         bp->stats_pending++;
3752                         if (bp->stats_pending == 3) {
3753                                 BNX2X_ERR("stats not updated for 3 times\n");
3754                                 bnx2x_panic();
3755                                 return;
3756                         }
3757                 }
3758         }
3759
3760         if (bp->msglevel & NETIF_MSG_TIMER) {
3761                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3762                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3763                 struct net_device_stats *nstats = &bp->dev->stats;
3764                 int i;
3765
3766                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3767                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3768                                   "  tx pkt (%lx)\n",
3769                        bnx2x_tx_avail(bp->fp),
3770                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3771                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3772                                   "  rx pkt (%lx)\n",
3773                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3774                              bp->fp->rx_comp_cons),
3775                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3776                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3777                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3778                        estats->driver_xoff, estats->brb_drop_lo);
3779                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3780                         "packets_too_big_discard %u  no_buff_discard %u  "
3781                         "mac_discard %u  mac_filter_discard %u  "
3782                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3783                         "ttl0_discard %u\n",
3784                        old_tclient->checksum_discard,
3785                        old_tclient->packets_too_big_discard,
3786                        old_tclient->no_buff_discard, estats->mac_discard,
3787                        estats->mac_filter_discard, estats->xxoverflow_discard,
3788                        estats->brb_truncate_discard,
3789                        old_tclient->ttl0_discard);
3790
3791                 for_each_queue(bp, i) {
3792                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3793                                bnx2x_fp(bp, i, tx_pkt),
3794                                bnx2x_fp(bp, i, rx_pkt),
3795                                bnx2x_fp(bp, i, rx_calls));
3796                 }
3797         }
3798
3799         bnx2x_hw_stats_post(bp);
3800         bnx2x_storm_stats_post(bp);
3801 }
3802
3803 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3804 {
3805         struct dmae_command *dmae;
3806         u32 opcode;
3807         int loader_idx = PMF_DMAE_C(bp);
3808         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3809
3810         bp->executer_idx = 0;
3811
3812         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3813                   DMAE_CMD_C_ENABLE |
3814                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3815 #ifdef __BIG_ENDIAN
3816                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3817 #else
3818                   DMAE_CMD_ENDIANITY_DW_SWAP |
3819 #endif
3820                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3821                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3822
3823         if (bp->port.port_stx) {
3824
3825                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3826                 if (bp->func_stx)
3827                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3828                 else
3829                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3830                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3831                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3832                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3833                 dmae->dst_addr_hi = 0;
3834                 dmae->len = sizeof(struct host_port_stats) >> 2;
3835                 if (bp->func_stx) {
3836                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3837                         dmae->comp_addr_hi = 0;
3838                         dmae->comp_val = 1;
3839                 } else {
3840                         dmae->comp_addr_lo =
3841                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3842                         dmae->comp_addr_hi =
3843                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3844                         dmae->comp_val = DMAE_COMP_VAL;
3845
3846                         *stats_comp = 0;
3847                 }
3848         }
3849
3850         if (bp->func_stx) {
3851
3852                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3853                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3854                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3855                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3856                 dmae->dst_addr_lo = bp->func_stx >> 2;
3857                 dmae->dst_addr_hi = 0;
3858                 dmae->len = sizeof(struct host_func_stats) >> 2;
3859                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3860                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3861                 dmae->comp_val = DMAE_COMP_VAL;
3862
3863                 *stats_comp = 0;
3864         }
3865 }
3866
3867 static void bnx2x_stats_stop(struct bnx2x *bp)
3868 {
3869         int update = 0;
3870
3871         bnx2x_stats_comp(bp);
3872
3873         if (bp->port.pmf)
3874                 update = (bnx2x_hw_stats_update(bp) == 0);
3875
3876         update |= (bnx2x_storm_stats_update(bp) == 0);
3877
3878         if (update) {
3879                 bnx2x_net_stats_update(bp);
3880
3881                 if (bp->port.pmf)
3882                         bnx2x_port_stats_stop(bp);
3883
3884                 bnx2x_hw_stats_post(bp);
3885                 bnx2x_stats_comp(bp);
3886         }
3887 }
3888
3889 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3890 {
3891 }
3892
3893 static const struct {
3894         void (*action)(struct bnx2x *bp);
3895         enum bnx2x_stats_state next_state;
3896 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3897 /* state        event   */
3898 {
3899 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3900 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3901 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3902 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3903 },
3904 {
3905 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3906 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3907 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3908 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3909 }
3910 };
3911
3912 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3913 {
3914         enum bnx2x_stats_state state = bp->stats_state;
3915
3916         bnx2x_stats_stm[state][event].action(bp);
3917         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3918
3919         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3920                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3921                    state, event, bp->stats_state);
3922 }
3923
3924 static void bnx2x_timer(unsigned long data)
3925 {
3926         struct bnx2x *bp = (struct bnx2x *) data;
3927
3928         if (!netif_running(bp->dev))
3929                 return;
3930
3931         if (atomic_read(&bp->intr_sem) != 0)
3932                 goto timer_restart;
3933
3934         if (poll) {
3935                 struct bnx2x_fastpath *fp = &bp->fp[0];
3936                 int rc;
3937
3938                 bnx2x_tx_int(fp, 1000);
3939                 rc = bnx2x_rx_int(fp, 1000);
3940         }
3941
3942         if (!BP_NOMCP(bp)) {
3943                 int func = BP_FUNC(bp);
3944                 u32 drv_pulse;
3945                 u32 mcp_pulse;
3946
3947                 ++bp->fw_drv_pulse_wr_seq;
3948                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3949                 /* TBD - add SYSTEM_TIME */
3950                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3951                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3952
3953                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3954                              MCP_PULSE_SEQ_MASK);
3955                 /* The delta between driver pulse and mcp response
3956                  * should be 1 (before mcp response) or 0 (after mcp response)
3957                  */
3958                 if ((drv_pulse != mcp_pulse) &&
3959                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3960                         /* someone lost a heartbeat... */
3961                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3962                                   drv_pulse, mcp_pulse);
3963                 }
3964         }
3965
3966         if ((bp->state == BNX2X_STATE_OPEN) ||
3967             (bp->state == BNX2X_STATE_DISABLED))
3968                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3969
3970 timer_restart:
3971         mod_timer(&bp->timer, jiffies + bp->current_interval);
3972 }
3973
3974 /* end of Statistics */
3975
3976 /* nic init */
3977
3978 /*
3979  * nic init service functions
3980  */
3981
3982 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3983 {
3984         int port = BP_PORT(bp);
3985
3986         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3987                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3988                         sizeof(struct ustorm_def_status_block)/4);
3989         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3990                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3991                         sizeof(struct cstorm_def_status_block)/4);
3992 }
3993
3994 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
3995                           dma_addr_t mapping, int sb_id)
3996 {
3997         int port = BP_PORT(bp);
3998         int func = BP_FUNC(bp);
3999         int index;
4000         u64 section;
4001
4002         /* USTORM */
4003         section = ((u64)mapping) + offsetof(struct host_status_block,
4004                                             u_status_block);
4005         sb->u_status_block.status_block_id = sb_id;
4006
4007         REG_WR(bp, BAR_USTRORM_INTMEM +
4008                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4009         REG_WR(bp, BAR_USTRORM_INTMEM +
4010                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4011                U64_HI(section));
4012         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4013                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4014
4015         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4016                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4017                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4018
4019         /* CSTORM */
4020         section = ((u64)mapping) + offsetof(struct host_status_block,
4021                                             c_status_block);
4022         sb->c_status_block.status_block_id = sb_id;
4023
4024         REG_WR(bp, BAR_CSTRORM_INTMEM +
4025                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4026         REG_WR(bp, BAR_CSTRORM_INTMEM +
4027                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4028                U64_HI(section));
4029         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4030                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4031
4032         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4033                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4034                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4035
4036         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4037 }
4038
4039 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4040 {
4041         int func = BP_FUNC(bp);
4042
4043         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4044                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4045                         sizeof(struct ustorm_def_status_block)/4);
4046         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4047                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4048                         sizeof(struct cstorm_def_status_block)/4);
4049         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4050                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4051                         sizeof(struct xstorm_def_status_block)/4);
4052         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4053                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054                         sizeof(struct tstorm_def_status_block)/4);
4055 }
4056
4057 static void bnx2x_init_def_sb(struct bnx2x *bp,
4058                               struct host_def_status_block *def_sb,
4059                               dma_addr_t mapping, int sb_id)
4060 {
4061         int port = BP_PORT(bp);
4062         int func = BP_FUNC(bp);
4063         int index, val, reg_offset;
4064         u64 section;
4065
4066         /* ATTN */
4067         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4068                                             atten_status_block);
4069         def_sb->atten_status_block.status_block_id = sb_id;
4070
4071         bp->attn_state = 0;
4072
4073         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4074                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4075
4076         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4077                 bp->attn_group[index].sig[0] = REG_RD(bp,
4078                                                      reg_offset + 0x10*index);
4079                 bp->attn_group[index].sig[1] = REG_RD(bp,
4080                                                reg_offset + 0x4 + 0x10*index);
4081                 bp->attn_group[index].sig[2] = REG_RD(bp,
4082                                                reg_offset + 0x8 + 0x10*index);
4083                 bp->attn_group[index].sig[3] = REG_RD(bp,
4084                                                reg_offset + 0xc + 0x10*index);
4085         }
4086
4087         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4088                              HC_REG_ATTN_MSG0_ADDR_L);
4089
4090         REG_WR(bp, reg_offset, U64_LO(section));
4091         REG_WR(bp, reg_offset + 4, U64_HI(section));
4092
4093         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4094
4095         val = REG_RD(bp, reg_offset);
4096         val |= sb_id;
4097         REG_WR(bp, reg_offset, val);
4098
4099         /* USTORM */
4100         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4101                                             u_def_status_block);
4102         def_sb->u_def_status_block.status_block_id = sb_id;
4103
4104         REG_WR(bp, BAR_USTRORM_INTMEM +
4105                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4106         REG_WR(bp, BAR_USTRORM_INTMEM +
4107                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4108                U64_HI(section));
4109         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4110                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4111
4112         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4113                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4114                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4115
4116         /* CSTORM */
4117         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4118                                             c_def_status_block);
4119         def_sb->c_def_status_block.status_block_id = sb_id;
4120
4121         REG_WR(bp, BAR_CSTRORM_INTMEM +
4122                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4123         REG_WR(bp, BAR_CSTRORM_INTMEM +
4124                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4125                U64_HI(section));
4126         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4127                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4128
4129         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4130                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4131                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4132
4133         /* TSTORM */
4134         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135                                             t_def_status_block);
4136         def_sb->t_def_status_block.status_block_id = sb_id;
4137
4138         REG_WR(bp, BAR_TSTRORM_INTMEM +
4139                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140         REG_WR(bp, BAR_TSTRORM_INTMEM +
4141                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142                U64_HI(section));
4143         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4144                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4145
4146         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4148                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* XSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             x_def_status_block);
4153         def_sb->x_def_status_block.status_block_id = sb_id;
4154
4155         REG_WR(bp, BAR_XSTRORM_INTMEM +
4156                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157         REG_WR(bp, BAR_XSTRORM_INTMEM +
4158                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159                U64_HI(section));
4160         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4161                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4162
4163         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4164                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4165                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4166
4167         bp->stats_pending = 0;
4168         bp->set_mac_pending = 0;
4169
4170         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4171 }
4172
4173 static void bnx2x_update_coalesce(struct bnx2x *bp)
4174 {
4175         int port = BP_PORT(bp);
4176         int i;
4177
4178         for_each_queue(bp, i) {
4179                 int sb_id = bp->fp[i].sb_id;
4180
4181                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4182                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4183                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4184                                                     U_SB_ETH_RX_CQ_INDEX),
4185                         bp->rx_ticks/12);
4186                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4187                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4188                                                      U_SB_ETH_RX_CQ_INDEX),
4189                          bp->rx_ticks ? 0 : 1);
4190                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4191                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4192                                                      U_SB_ETH_RX_BD_INDEX),
4193                          bp->rx_ticks ? 0 : 1);
4194
4195                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4196                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4197                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4198                                                     C_SB_ETH_TX_CQ_INDEX),
4199                         bp->tx_ticks/12);
4200                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4201                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4202                                                      C_SB_ETH_TX_CQ_INDEX),
4203                          bp->tx_ticks ? 0 : 1);
4204         }
4205 }
4206
4207 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4208                                        struct bnx2x_fastpath *fp, int last)
4209 {
4210         int i;
4211
4212         for (i = 0; i < last; i++) {
4213                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4214                 struct sk_buff *skb = rx_buf->skb;
4215
4216                 if (skb == NULL) {
4217                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4218                         continue;
4219                 }
4220
4221                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4222                         pci_unmap_single(bp->pdev,
4223                                          pci_unmap_addr(rx_buf, mapping),
4224                                          bp->rx_buf_use_size,
4225                                          PCI_DMA_FROMDEVICE);
4226
4227                 dev_kfree_skb(skb);
4228                 rx_buf->skb = NULL;
4229         }
4230 }
4231
4232 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4233 {
4234         int func = BP_FUNC(bp);
4235         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4236                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4237         u16 ring_prod, cqe_ring_prod;
4238         int i, j;
4239
4240         bp->rx_buf_use_size = bp->dev->mtu;
4241         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4242         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4243
4244         if (bp->flags & TPA_ENABLE_FLAG) {
4245                 DP(NETIF_MSG_IFUP,
4246                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4247                    bp->rx_buf_use_size, bp->rx_buf_size,
4248                    bp->dev->mtu + ETH_OVREHEAD);
4249
4250                 for_each_queue(bp, j) {
4251                         struct bnx2x_fastpath *fp = &bp->fp[j];
4252
4253                         for (i = 0; i < max_agg_queues; i++) {
4254                                 fp->tpa_pool[i].skb =
4255                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4256                                 if (!fp->tpa_pool[i].skb) {
4257                                         BNX2X_ERR("Failed to allocate TPA "
4258                                                   "skb pool for queue[%d] - "
4259                                                   "disabling TPA on this "
4260                                                   "queue!\n", j);
4261                                         bnx2x_free_tpa_pool(bp, fp, i);
4262                                         fp->disable_tpa = 1;
4263                                         break;
4264                                 }
4265                                 pci_unmap_addr_set((struct sw_rx_bd *)
4266                                                         &bp->fp->tpa_pool[i],
4267                                                    mapping, 0);
4268                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4269                         }
4270                 }
4271         }
4272
4273         for_each_queue(bp, j) {
4274                 struct bnx2x_fastpath *fp = &bp->fp[j];
4275
4276                 fp->rx_bd_cons = 0;
4277                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4278                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4279
4280                 /* "next page" elements initialization */
4281                 /* SGE ring */
4282                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4283                         struct eth_rx_sge *sge;
4284
4285                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4286                         sge->addr_hi =
4287                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4288                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4289                         sge->addr_lo =
4290                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4291                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4292                 }
4293
4294                 bnx2x_init_sge_ring_bit_mask(fp);
4295
4296                 /* RX BD ring */
4297                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4298                         struct eth_rx_bd *rx_bd;
4299
4300                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4301                         rx_bd->addr_hi =
4302                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4303                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4304                         rx_bd->addr_lo =
4305                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4306                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4307                 }
4308
4309                 /* CQ ring */
4310                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4311                         struct eth_rx_cqe_next_page *nextpg;
4312
4313                         nextpg = (struct eth_rx_cqe_next_page *)
4314                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4315                         nextpg->addr_hi =
4316                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4317                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4318                         nextpg->addr_lo =
4319                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4320                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4321                 }
4322
4323                 /* Allocate SGEs and initialize the ring elements */
4324                 for (i = 0, ring_prod = 0;
4325                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4326
4327                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4328                                 BNX2X_ERR("was only able to allocate "
4329                                           "%d rx sges\n", i);
4330                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4331                                 /* Cleanup already allocated elements */
4332                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4333                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4334                                 fp->disable_tpa = 1;
4335                                 ring_prod = 0;
4336                                 break;
4337                         }
4338                         ring_prod = NEXT_SGE_IDX(ring_prod);
4339                 }
4340                 fp->rx_sge_prod = ring_prod;
4341
4342                 /* Allocate BDs and initialize BD ring */
4343                 fp->rx_comp_cons = 0;
4344                 cqe_ring_prod = ring_prod = 0;
4345                 for (i = 0; i < bp->rx_ring_size; i++) {
4346                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4347                                 BNX2X_ERR("was only able to allocate "
4348                                           "%d rx skbs\n", i);
4349                                 bp->eth_stats.rx_skb_alloc_failed++;
4350                                 break;
4351                         }
4352                         ring_prod = NEXT_RX_IDX(ring_prod);
4353                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4354                         WARN_ON(ring_prod <= i);
4355                 }
4356
4357                 fp->rx_bd_prod = ring_prod;
4358                 /* must not have more available CQEs than BDs */
4359                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4360                                        cqe_ring_prod);
4361                 fp->rx_pkt = fp->rx_calls = 0;
4362
4363                 /* Warning!
4364                  * this will generate an interrupt (to the TSTORM)
4365                  * must only be done after chip is initialized
4366                  */
4367                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4368                                      fp->rx_sge_prod);
4369                 if (j != 0)
4370                         continue;
4371
4372                 REG_WR(bp, BAR_USTRORM_INTMEM +
4373                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4374                        U64_LO(fp->rx_comp_mapping));
4375                 REG_WR(bp, BAR_USTRORM_INTMEM +
4376                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4377                        U64_HI(fp->rx_comp_mapping));
4378         }
4379 }
4380
4381 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4382 {
4383         int i, j;
4384
4385         for_each_queue(bp, j) {
4386                 struct bnx2x_fastpath *fp = &bp->fp[j];
4387
4388                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4389                         struct eth_tx_bd *tx_bd =
4390                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4391
4392                         tx_bd->addr_hi =
4393                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4394                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4395                         tx_bd->addr_lo =
4396                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4397                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4398                 }
4399
4400                 fp->tx_pkt_prod = 0;
4401                 fp->tx_pkt_cons = 0;
4402                 fp->tx_bd_prod = 0;
4403                 fp->tx_bd_cons = 0;
4404                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4405                 fp->tx_pkt = 0;
4406         }
4407 }
4408
4409 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4410 {
4411         int func = BP_FUNC(bp);
4412
4413         spin_lock_init(&bp->spq_lock);
4414
4415         bp->spq_left = MAX_SPQ_PENDING;
4416         bp->spq_prod_idx = 0;
4417         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4418         bp->spq_prod_bd = bp->spq;
4419         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4420
4421         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4422                U64_LO(bp->spq_mapping));
4423         REG_WR(bp,
4424                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4425                U64_HI(bp->spq_mapping));
4426
4427         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4428                bp->spq_prod_idx);
4429 }
4430
4431 static void bnx2x_init_context(struct bnx2x *bp)
4432 {
4433         int i;
4434
4435         for_each_queue(bp, i) {
4436                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4437                 struct bnx2x_fastpath *fp = &bp->fp[i];
4438                 u8 sb_id = FP_SB_ID(fp);
4439
4440                 context->xstorm_st_context.tx_bd_page_base_hi =
4441                                                 U64_HI(fp->tx_desc_mapping);
4442                 context->xstorm_st_context.tx_bd_page_base_lo =
4443                                                 U64_LO(fp->tx_desc_mapping);
4444                 context->xstorm_st_context.db_data_addr_hi =
4445                                                 U64_HI(fp->tx_prods_mapping);
4446                 context->xstorm_st_context.db_data_addr_lo =
4447                                                 U64_LO(fp->tx_prods_mapping);
4448                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4449                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4450
4451                 context->ustorm_st_context.common.sb_index_numbers =
4452                                                 BNX2X_RX_SB_INDEX_NUM;
4453                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4454                 context->ustorm_st_context.common.status_block_id = sb_id;
4455                 context->ustorm_st_context.common.flags =
4456                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4457                 context->ustorm_st_context.common.mc_alignment_size = 64;
4458                 context->ustorm_st_context.common.bd_buff_size =
4459                                                 bp->rx_buf_use_size;
4460                 context->ustorm_st_context.common.bd_page_base_hi =
4461                                                 U64_HI(fp->rx_desc_mapping);
4462                 context->ustorm_st_context.common.bd_page_base_lo =
4463                                                 U64_LO(fp->rx_desc_mapping);
4464                 if (!fp->disable_tpa) {
4465                         context->ustorm_st_context.common.flags |=
4466                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4467                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4468                         context->ustorm_st_context.common.sge_buff_size =
4469                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4470                         context->ustorm_st_context.common.sge_page_base_hi =
4471                                                 U64_HI(fp->rx_sge_mapping);
4472                         context->ustorm_st_context.common.sge_page_base_lo =
4473                                                 U64_LO(fp->rx_sge_mapping);
4474                 }
4475
4476                 context->cstorm_st_context.sb_index_number =
4477                                                 C_SB_ETH_TX_CQ_INDEX;
4478                 context->cstorm_st_context.status_block_id = sb_id;
4479
4480                 context->xstorm_ag_context.cdu_reserved =
4481                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4482                                                CDU_REGION_NUMBER_XCM_AG,
4483                                                ETH_CONNECTION_TYPE);
4484                 context->ustorm_ag_context.cdu_usage =
4485                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4486                                                CDU_REGION_NUMBER_UCM_AG,
4487                                                ETH_CONNECTION_TYPE);
4488         }
4489 }
4490
4491 static void bnx2x_init_ind_table(struct bnx2x *bp)
4492 {
4493         int port = BP_PORT(bp);
4494         int i;
4495
4496         if (!is_multi(bp))
4497                 return;
4498
4499         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4500         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4501                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4502                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4503                         i % bp->num_queues);
4504
4505         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4506 }
4507
4508 static void bnx2x_set_client_config(struct bnx2x *bp)
4509 {
4510         struct tstorm_eth_client_config tstorm_client = {0};
4511         int port = BP_PORT(bp);
4512         int i;
4513
4514         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4515         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4516         tstorm_client.config_flags =
4517                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4518 #ifdef BCM_VLAN
4519         if (bp->rx_mode && bp->vlgrp) {
4520                 tstorm_client.config_flags |=
4521                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4522                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4523         }
4524 #endif
4525
4526         if (bp->flags & TPA_ENABLE_FLAG) {
4527                 tstorm_client.max_sges_for_packet =
4528                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4529                 tstorm_client.max_sges_for_packet =
4530                         ((tstorm_client.max_sges_for_packet +
4531                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4532                         PAGES_PER_SGE_SHIFT;
4533
4534                 tstorm_client.config_flags |=
4535                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4536         }
4537
4538         for_each_queue(bp, i) {
4539                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4540                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4541                        ((u32 *)&tstorm_client)[0]);
4542                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4543                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4544                        ((u32 *)&tstorm_client)[1]);
4545         }
4546
4547         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4548            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4549 }
4550
4551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4552 {
4553         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4554         int mode = bp->rx_mode;
4555         int mask = (1 << BP_L_ID(bp));
4556         int func = BP_FUNC(bp);
4557         int i;
4558
4559         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4560
4561         switch (mode) {
4562         case BNX2X_RX_MODE_NONE: /* no Rx */
4563                 tstorm_mac_filter.ucast_drop_all = mask;
4564                 tstorm_mac_filter.mcast_drop_all = mask;
4565                 tstorm_mac_filter.bcast_drop_all = mask;
4566                 break;
4567         case BNX2X_RX_MODE_NORMAL:
4568                 tstorm_mac_filter.bcast_accept_all = mask;
4569                 break;
4570         case BNX2X_RX_MODE_ALLMULTI:
4571                 tstorm_mac_filter.mcast_accept_all = mask;
4572                 tstorm_mac_filter.bcast_accept_all = mask;
4573                 break;
4574         case BNX2X_RX_MODE_PROMISC:
4575                 tstorm_mac_filter.ucast_accept_all = mask;
4576                 tstorm_mac_filter.mcast_accept_all = mask;
4577                 tstorm_mac_filter.bcast_accept_all = mask;
4578                 break;
4579         default:
4580                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4581                 break;
4582         }
4583
4584         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4585                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4587                        ((u32 *)&tstorm_mac_filter)[i]);
4588
4589 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4590                    ((u32 *)&tstorm_mac_filter)[i]); */
4591         }
4592
4593         if (mode != BNX2X_RX_MODE_NONE)
4594                 bnx2x_set_client_config(bp);
4595 }
4596
4597 static void bnx2x_init_internal_common(struct bnx2x *bp)
4598 {
4599         int i;
4600
4601         /* Zero this manually as its initialization is
4602            currently missing in the initTool */
4603         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4604                 REG_WR(bp, BAR_USTRORM_INTMEM +
4605                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4606 }
4607
4608 static void bnx2x_init_internal_port(struct bnx2x *bp)
4609 {
4610         int port = BP_PORT(bp);
4611
4612         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4613         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4614         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4615         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4616 }
4617
4618 static void bnx2x_init_internal_func(struct bnx2x *bp)
4619 {
4620         struct tstorm_eth_function_common_config tstorm_config = {0};
4621         struct stats_indication_flags stats_flags = {0};
4622         int port = BP_PORT(bp);
4623         int func = BP_FUNC(bp);
4624         int i;
4625         u16 max_agg_size;
4626
4627         if (is_multi(bp)) {
4628                 tstorm_config.config_flags = MULTI_FLAGS;
4629                 tstorm_config.rss_result_mask = MULTI_MASK;
4630         }
4631
4632         tstorm_config.leading_client_id = BP_L_ID(bp);
4633
4634         REG_WR(bp, BAR_TSTRORM_INTMEM +
4635                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4636                (*(u32 *)&tstorm_config));
4637
4638         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4639         bnx2x_set_storm_rx_mode(bp);
4640
4641         /* reset xstorm per client statistics */
4642         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4643                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4644                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4645                        i*4, 0);
4646         }
4647         /* reset tstorm per client statistics */
4648         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4649                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4650                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4651                        i*4, 0);
4652         }
4653
4654         /* Init statistics related context */
4655         stats_flags.collect_eth = 1;
4656
4657         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4658                ((u32 *)&stats_flags)[0]);
4659         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660                ((u32 *)&stats_flags)[1]);
4661
4662         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4663                ((u32 *)&stats_flags)[0]);
4664         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4665                ((u32 *)&stats_flags)[1]);
4666
4667         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4668                ((u32 *)&stats_flags)[0]);
4669         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4670                ((u32 *)&stats_flags)[1]);
4671
4672         REG_WR(bp, BAR_XSTRORM_INTMEM +
4673                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4674                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4675         REG_WR(bp, BAR_XSTRORM_INTMEM +
4676                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4677                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4678
4679         REG_WR(bp, BAR_TSTRORM_INTMEM +
4680                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682         REG_WR(bp, BAR_TSTRORM_INTMEM +
4683                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686         if (CHIP_IS_E1H(bp)) {
4687                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4688                         IS_E1HMF(bp));
4689                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4690                         IS_E1HMF(bp));
4691                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4692                         IS_E1HMF(bp));
4693                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4694                         IS_E1HMF(bp));
4695
4696                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4697                          bp->e1hov);
4698         }
4699
4700         /* Init CQ ring mapping and aggregation size */
4701         max_agg_size = min((u32)(bp->rx_buf_use_size +
4702                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4703                            (u32)0xffff);
4704         for_each_queue(bp, i) {
4705                 struct bnx2x_fastpath *fp = &bp->fp[i];
4706
4707                 REG_WR(bp, BAR_USTRORM_INTMEM +
4708                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4709                        U64_LO(fp->rx_comp_mapping));
4710                 REG_WR(bp, BAR_USTRORM_INTMEM +
4711                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4712                        U64_HI(fp->rx_comp_mapping));
4713
4714                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4715                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4716                          max_agg_size);
4717         }
4718 }
4719
4720 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4721 {
4722         switch (load_code) {
4723         case FW_MSG_CODE_DRV_LOAD_COMMON:
4724                 bnx2x_init_internal_common(bp);
4725                 /* no break */
4726
4727         case FW_MSG_CODE_DRV_LOAD_PORT:
4728                 bnx2x_init_internal_port(bp);
4729                 /* no break */
4730
4731         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4732                 bnx2x_init_internal_func(bp);
4733                 break;
4734
4735         default:
4736                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4737                 break;
4738         }
4739 }
4740
4741 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4742 {
4743         int i;
4744
4745         for_each_queue(bp, i) {
4746                 struct bnx2x_fastpath *fp = &bp->fp[i];
4747
4748                 fp->bp = bp;
4749                 fp->state = BNX2X_FP_STATE_CLOSED;
4750                 fp->index = i;
4751                 fp->cl_id = BP_L_ID(bp) + i;
4752                 fp->sb_id = fp->cl_id;
4753                 DP(NETIF_MSG_IFUP,
4754                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4755                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4756                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4757                               FP_SB_ID(fp));
4758                 bnx2x_update_fpsb_idx(fp);
4759         }
4760
4761         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4762                           DEF_SB_ID);
4763         bnx2x_update_dsb_idx(bp);
4764         bnx2x_update_coalesce(bp);
4765         bnx2x_init_rx_rings(bp);
4766         bnx2x_init_tx_ring(bp);
4767         bnx2x_init_sp_ring(bp);
4768         bnx2x_init_context(bp);
4769         bnx2x_init_internal(bp, load_code);
4770         bnx2x_init_ind_table(bp);
4771         bnx2x_int_enable(bp);
4772 }
4773
4774 /* end of nic init */
4775
4776 /*
4777  * gzip service functions
4778  */
4779
4780 static int bnx2x_gunzip_init(struct bnx2x *bp)
4781 {
4782         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4783                                               &bp->gunzip_mapping);
4784         if (bp->gunzip_buf  == NULL)
4785                 goto gunzip_nomem1;
4786
4787         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4788         if (bp->strm  == NULL)
4789                 goto gunzip_nomem2;
4790
4791         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4792                                       GFP_KERNEL);
4793         if (bp->strm->workspace == NULL)
4794                 goto gunzip_nomem3;
4795
4796         return 0;
4797
4798 gunzip_nomem3:
4799         kfree(bp->strm);
4800         bp->strm = NULL;
4801
4802 gunzip_nomem2:
4803         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4804                             bp->gunzip_mapping);
4805         bp->gunzip_buf = NULL;
4806
4807 gunzip_nomem1:
4808         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4809                " un-compression\n", bp->dev->name);
4810         return -ENOMEM;
4811 }
4812
4813 static void bnx2x_gunzip_end(struct bnx2x *bp)
4814 {
4815         kfree(bp->strm->workspace);
4816
4817         kfree(bp->strm);
4818         bp->strm = NULL;
4819
4820         if (bp->gunzip_buf) {
4821                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822                                     bp->gunzip_mapping);
4823                 bp->gunzip_buf = NULL;
4824         }
4825 }
4826
4827 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4828 {
4829         int n, rc;
4830
4831         /* check gzip header */
4832         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4833                 return -EINVAL;
4834
4835         n = 10;
4836
4837 #define FNAME                           0x8
4838
4839         if (zbuf[3] & FNAME)
4840                 while ((zbuf[n++] != 0) && (n < len));
4841
4842         bp->strm->next_in = zbuf + n;
4843         bp->strm->avail_in = len - n;
4844         bp->strm->next_out = bp->gunzip_buf;
4845         bp->strm->avail_out = FW_BUF_SIZE;
4846
4847         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4848         if (rc != Z_OK)
4849                 return rc;
4850
4851         rc = zlib_inflate(bp->strm, Z_FINISH);
4852         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4853                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4854                        bp->dev->name, bp->strm->msg);
4855
4856         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4857         if (bp->gunzip_outlen & 0x3)
4858                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4859                                     " gunzip_outlen (%d) not aligned\n",
4860                        bp->dev->name, bp->gunzip_outlen);
4861         bp->gunzip_outlen >>= 2;
4862
4863         zlib_inflateEnd(bp->strm);
4864
4865         if (rc == Z_STREAM_END)
4866                 return 0;
4867
4868         return rc;
4869 }
4870
4871 /* nic load/unload */
4872
4873 /*
4874  * General service functions
4875  */
4876
4877 /* send a NIG loopback debug packet */
4878 static void bnx2x_lb_pckt(struct bnx2x *bp)
4879 {
4880         u32 wb_write[3];
4881
4882         /* Ethernet source and destination addresses */
4883         wb_write[0] = 0x55555555;
4884         wb_write[1] = 0x55555555;
4885         wb_write[2] = 0x20;             /* SOP */
4886         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4887
4888         /* NON-IP protocol */
4889         wb_write[0] = 0x09000000;
4890         wb_write[1] = 0x55555555;
4891         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4892         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4893 }
4894
4895 /* some of the internal memories
4896  * are not directly readable from the driver
4897  * to test them we send debug packets
4898  */
4899 static int bnx2x_int_mem_test(struct bnx2x *bp)
4900 {
4901         int factor;
4902         int count, i;
4903         u32 val = 0;
4904
4905         if (CHIP_REV_IS_FPGA(bp))
4906                 factor = 120;
4907         else if (CHIP_REV_IS_EMUL(bp))
4908                 factor = 200;
4909         else
4910                 factor = 1;
4911
4912         DP(NETIF_MSG_HW, "start part1\n");
4913
4914         /* Disable inputs of parser neighbor blocks */
4915         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4916         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4917         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4918         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4919
4920         /*  Write 0 to parser credits for CFC search request */
4921         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4922
4923         /* send Ethernet packet */
4924         bnx2x_lb_pckt(bp);
4925
4926         /* TODO do i reset NIG statistic? */
4927         /* Wait until NIG register shows 1 packet of size 0x10 */
4928         count = 1000 * factor;
4929         while (count) {
4930
4931                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4932                 val = *bnx2x_sp(bp, wb_data[0]);
4933                 if (val == 0x10)
4934                         break;
4935
4936                 msleep(10);
4937                 count--;
4938         }
4939         if (val != 0x10) {
4940                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4941                 return -1;
4942         }
4943
4944         /* Wait until PRS register shows 1 packet */
4945         count = 1000 * factor;
4946         while (count) {
4947                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4948                 if (val == 1)
4949                         break;
4950
4951                 msleep(10);
4952                 count--;
4953         }
4954         if (val != 0x1) {
4955                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4956                 return -2;
4957         }
4958
4959         /* Reset and init BRB, PRS */
4960         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4961         msleep(50);
4962         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4963         msleep(50);
4964         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4965         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4966
4967         DP(NETIF_MSG_HW, "part2\n");
4968
4969         /* Disable inputs of parser neighbor blocks */
4970         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4971         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4972         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4973         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4974
4975         /* Write 0 to parser credits for CFC search request */
4976         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4977
4978         /* send 10 Ethernet packets */
4979         for (i = 0; i < 10; i++)
4980                 bnx2x_lb_pckt(bp);
4981
4982         /* Wait until NIG register shows 10 + 1
4983            packets of size 11*0x10 = 0xb0 */
4984         count = 1000 * factor;
4985         while (count) {
4986
4987                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4988                 val = *bnx2x_sp(bp, wb_data[0]);
4989                 if (val == 0xb0)
4990                         break;
4991
4992                 msleep(10);
4993                 count--;
4994         }
4995         if (val != 0xb0) {
4996                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4997                 return -3;
4998         }
4999
5000         /* Wait until PRS register shows 2 packets */
5001         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5002         if (val != 2)
5003                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5004
5005         /* Write 1 to parser credits for CFC search request */
5006         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5007
5008         /* Wait until PRS register shows 3 packets */
5009         msleep(10 * factor);
5010         /* Wait until NIG register shows 1 packet of size 0x10 */
5011         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012         if (val != 3)
5013                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5014
5015         /* clear NIG EOP FIFO */
5016         for (i = 0; i < 11; i++)
5017                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5018         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5019         if (val != 1) {
5020                 BNX2X_ERR("clear of NIG failed\n");
5021                 return -4;
5022         }
5023
5024         /* Reset and init BRB, PRS, NIG */
5025         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5026         msleep(50);
5027         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5028         msleep(50);
5029         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5030         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5031 #ifndef BCM_ISCSI
5032         /* set NIC mode */
5033         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5034 #endif
5035
5036         /* Enable inputs of parser neighbor blocks */
5037         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5038         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5039         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5040         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5041
5042         DP(NETIF_MSG_HW, "done\n");
5043
5044         return 0; /* OK */
5045 }
5046
5047 static void enable_blocks_attention(struct bnx2x *bp)
5048 {
5049         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5050         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5051         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5052         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5053         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5054         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5055         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5056         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5057         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5058 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5059 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5060         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5061         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5062         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5063 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5064 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5065         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5066         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5067         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5068         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5069 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5070 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5071         if (CHIP_REV_IS_FPGA(bp))
5072                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5073         else
5074                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5075         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5076         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5077         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5078 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5079 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5080         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5081         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5082 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5083         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5084 }
5085
5086
5087 static int bnx2x_init_common(struct bnx2x *bp)
5088 {
5089         u32 val, i;
5090
5091         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5092
5093         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5094         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5095
5096         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5097         if (CHIP_IS_E1H(bp))
5098                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5099
5100         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5101         msleep(30);
5102         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5103
5104         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5105         if (CHIP_IS_E1(bp)) {
5106                 /* enable HW interrupt from PXP on USDM overflow
5107                    bit 16 on INT_MASK_0 */
5108                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5109         }
5110
5111         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5112         bnx2x_init_pxp(bp);
5113
5114 #ifdef __BIG_ENDIAN
5115         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5116         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5117         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5118         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5119         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5120         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5121
5122 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5123         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5124         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5125         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5126         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5127 #endif
5128
5129 #ifndef BCM_ISCSI
5130                 /* set NIC mode */
5131                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5132 #endif
5133
5134         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5135 #ifdef BCM_ISCSI
5136         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5137         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5138         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5139 #endif
5140
5141         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5142                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5143
5144         /* let the HW do it's magic ... */
5145         msleep(100);
5146         /* finish PXP init */
5147         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5148         if (val != 1) {
5149                 BNX2X_ERR("PXP2 CFG failed\n");
5150                 return -EBUSY;
5151         }
5152         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5153         if (val != 1) {
5154                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5155                 return -EBUSY;
5156         }
5157
5158         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5159         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5160
5161         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5162
5163         /* clean the DMAE memory */
5164         bp->dmae_ready = 1;
5165         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5166
5167         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5168         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5169         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5170         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5171
5172         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5173         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5174         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5175         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5176
5177         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5178         /* soft reset pulse */
5179         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5180         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5181
5182 #ifdef BCM_ISCSI
5183         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5184 #endif
5185
5186         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5187         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5188         if (!CHIP_REV_IS_SLOW(bp)) {
5189                 /* enable hw interrupt from doorbell Q */
5190                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5191         }
5192
5193         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5194         if (CHIP_REV_IS_SLOW(bp)) {
5195                 /* fix for emulation and FPGA for no pause */
5196                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5197                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5198                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5199                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5200         }
5201
5202         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5203         if (CHIP_IS_E1H(bp))
5204                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5205
5206         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5207         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5208         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5209         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5210
5211         if (CHIP_IS_E1H(bp)) {
5212                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5213                                 STORM_INTMEM_SIZE_E1H/2);
5214                 bnx2x_init_fill(bp,
5215                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5216                                 0, STORM_INTMEM_SIZE_E1H/2);
5217                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5218                                 STORM_INTMEM_SIZE_E1H/2);
5219                 bnx2x_init_fill(bp,
5220                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221                                 0, STORM_INTMEM_SIZE_E1H/2);
5222                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5223                                 STORM_INTMEM_SIZE_E1H/2);
5224                 bnx2x_init_fill(bp,
5225                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226                                 0, STORM_INTMEM_SIZE_E1H/2);
5227                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5228                                 STORM_INTMEM_SIZE_E1H/2);
5229                 bnx2x_init_fill(bp,
5230                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231                                 0, STORM_INTMEM_SIZE_E1H/2);
5232         } else { /* E1 */
5233                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5234                                 STORM_INTMEM_SIZE_E1);
5235                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5236                                 STORM_INTMEM_SIZE_E1);
5237                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5238                                 STORM_INTMEM_SIZE_E1);
5239                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5240                                 STORM_INTMEM_SIZE_E1);
5241         }
5242
5243         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5244         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5245         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5246         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5247
5248         /* sync semi rtc */
5249         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5250                0x80000000);
5251         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5252                0x80000000);
5253
5254         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5255         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5256         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5257
5258         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5259         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5260                 REG_WR(bp, i, 0xc0cac01a);
5261                 /* TODO: replace with something meaningful */
5262         }
5263         if (CHIP_IS_E1H(bp))
5264                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5265         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5266
5267         if (sizeof(union cdu_context) != 1024)
5268                 /* we currently assume that a context is 1024 bytes */
5269                 printk(KERN_ALERT PFX "please adjust the size of"
5270                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5271
5272         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5273         val = (4 << 24) + (0 << 12) + 1024;
5274         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5275         if (CHIP_IS_E1(bp)) {
5276                 /* !!! fix pxp client crdit until excel update */
5277                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5278                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5279         }
5280
5281         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5282         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5283
5284         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5285         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5286
5287         /* PXPCS COMMON comes here */
5288         /* Reset PCIE errors for debug */
5289         REG_WR(bp, 0x2814, 0xffffffff);
5290         REG_WR(bp, 0x3820, 0xffffffff);
5291
5292         /* EMAC0 COMMON comes here */
5293         /* EMAC1 COMMON comes here */
5294         /* DBU COMMON comes here */
5295         /* DBG COMMON comes here */
5296
5297         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5298         if (CHIP_IS_E1H(bp)) {
5299                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5300                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5301         }
5302
5303         if (CHIP_REV_IS_SLOW(bp))
5304                 msleep(200);
5305
5306         /* finish CFC init */
5307         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5308         if (val != 1) {
5309                 BNX2X_ERR("CFC LL_INIT failed\n");
5310                 return -EBUSY;
5311         }
5312         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5313         if (val != 1) {
5314                 BNX2X_ERR("CFC AC_INIT failed\n");
5315                 return -EBUSY;
5316         }
5317         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5318         if (val != 1) {
5319                 BNX2X_ERR("CFC CAM_INIT failed\n");
5320                 return -EBUSY;
5321         }
5322         REG_WR(bp, CFC_REG_DEBUG0, 0);
5323
5324         /* read NIG statistic
5325            to see if this is our first up since powerup */
5326         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5327         val = *bnx2x_sp(bp, wb_data[0]);
5328
5329         /* do internal memory self test */
5330         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5331                 BNX2X_ERR("internal mem self test failed\n");
5332                 return -EBUSY;
5333         }
5334
5335         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5336         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5337                 /* Fan failure is indicated by SPIO 5 */
5338                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5339                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5340
5341                 /* set to active low mode */
5342                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5343                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5344                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5345                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5346
5347                 /* enable interrupt to signal the IGU */
5348                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5349                 val |= (1 << MISC_REGISTERS_SPIO_5);
5350                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5351                 break;
5352
5353         default:
5354                 break;
5355         }
5356
5357         /* clear PXP2 attentions */
5358         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5359
5360         enable_blocks_attention(bp);
5361
5362         if (bp->flags & TPA_ENABLE_FLAG) {
5363                 struct tstorm_eth_tpa_exist tmp = {0};
5364
5365                 tmp.tpa_exist = 1;
5366
5367                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5368                        ((u32 *)&tmp)[0]);
5369                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5370                        ((u32 *)&tmp)[1]);
5371         }
5372
5373         return 0;
5374 }
5375
5376 static int bnx2x_init_port(struct bnx2x *bp)
5377 {
5378         int port = BP_PORT(bp);
5379         u32 val;
5380
5381         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5382
5383         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5384
5385         /* Port PXP comes here */
5386         /* Port PXP2 comes here */
5387 #ifdef BCM_ISCSI
5388         /* Port0  1
5389          * Port1  385 */
5390         i++;
5391         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5392         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5393         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5394         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5395
5396         /* Port0  2
5397          * Port1  386 */
5398         i++;
5399         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5400         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5401         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5402         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5403
5404         /* Port0  3
5405          * Port1  387 */
5406         i++;
5407         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5408         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5409         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5410         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5411 #endif
5412         /* Port CMs come here */
5413
5414         /* Port QM comes here */
5415 #ifdef BCM_ISCSI
5416         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5417         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5418
5419         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5420                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5421 #endif
5422         /* Port DQ comes here */
5423         /* Port BRB1 comes here */
5424         /* Port PRS comes here */
5425         /* Port TSDM comes here */
5426         /* Port CSDM comes here */
5427         /* Port USDM comes here */
5428         /* Port XSDM comes here */
5429         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5430                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5431         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5432                              port ? USEM_PORT1_END : USEM_PORT0_END);
5433         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5434                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5435         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5436                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5437         /* Port UPB comes here */
5438         /* Port XPB comes here */
5439
5440         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5441                              port ? PBF_PORT1_END : PBF_PORT0_END);
5442
5443         /* configure PBF to work without PAUSE mtu 9000 */
5444         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5445
5446         /* update threshold */
5447         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5448         /* update init credit */
5449         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5450
5451         /* probe changes */
5452         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5453         msleep(5);
5454         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5455
5456 #ifdef BCM_ISCSI
5457         /* tell the searcher where the T2 table is */
5458         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5459
5460         wb_write[0] = U64_LO(bp->t2_mapping);
5461         wb_write[1] = U64_HI(bp->t2_mapping);
5462         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5463         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5464         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5465         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5466
5467         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5468         /* Port SRCH comes here */
5469 #endif
5470         /* Port CDU comes here */
5471         /* Port CFC comes here */
5472
5473         if (CHIP_IS_E1(bp)) {
5474                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5475                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5476         }
5477         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5478                              port ? HC_PORT1_END : HC_PORT0_END);
5479
5480         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5481                                     MISC_AEU_PORT0_START,
5482                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5483         /* init aeu_mask_attn_func_0/1:
5484          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5485          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5486          *             bits 4-7 are used for "per vn group attention" */
5487         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5488                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5489
5490         /* Port PXPCS comes here */
5491         /* Port EMAC0 comes here */
5492         /* Port EMAC1 comes here */
5493         /* Port DBU comes here */
5494         /* Port DBG comes here */
5495         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5496                              port ? NIG_PORT1_END : NIG_PORT0_END);
5497
5498         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5499
5500         if (CHIP_IS_E1H(bp)) {
5501                 u32 wsum;
5502                 struct cmng_struct_per_port m_cmng_port;
5503                 int vn;
5504
5505                 /* 0x2 disable e1hov, 0x1 enable */
5506                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5507                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5508
5509                 /* Init RATE SHAPING and FAIRNESS contexts.
5510                    Initialize as if there is 10G link. */
5511                 wsum = bnx2x_calc_vn_wsum(bp);
5512                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5513                 if (IS_E1HMF(bp))
5514                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5515                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5516                                         wsum, 10000, &m_cmng_port);
5517         }
5518
5519         /* Port MCP comes here */
5520         /* Port DMAE comes here */
5521
5522         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5523         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5524                 /* add SPIO 5 to group 0 */
5525                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5526                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5527                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5528                 break;
5529
5530         default:
5531                 break;
5532         }
5533
5534         bnx2x__link_reset(bp);
5535
5536         return 0;
5537 }
5538
5539 #define ILT_PER_FUNC            (768/2)
5540 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5541 /* the phys address is shifted right 12 bits and has an added
5542    1=valid bit added to the 53rd bit
5543    then since this is a wide register(TM)
5544    we split it into two 32 bit writes
5545  */
5546 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5547 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5548 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5549 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5550
5551 #define CNIC_ILT_LINES          0
5552
5553 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5554 {
5555         int reg;
5556
5557         if (CHIP_IS_E1H(bp))
5558                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5559         else /* E1 */
5560                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5561
5562         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5563 }
5564
5565 static int bnx2x_init_func(struct bnx2x *bp)
5566 {
5567         int port = BP_PORT(bp);
5568         int func = BP_FUNC(bp);
5569         int i;
5570
5571         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5572
5573         i = FUNC_ILT_BASE(func);
5574
5575         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5576         if (CHIP_IS_E1H(bp)) {
5577                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5578                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5579         } else /* E1 */
5580                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5581                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5582
5583
5584         if (CHIP_IS_E1H(bp)) {
5585                 for (i = 0; i < 9; i++)
5586                         bnx2x_init_block(bp,
5587                                          cm_start[func][i], cm_end[func][i]);
5588
5589                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5590                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5591         }
5592
5593         /* HC init per function */
5594         if (CHIP_IS_E1H(bp)) {
5595                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5596
5597                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5598                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5599         }
5600         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5601
5602         if (CHIP_IS_E1H(bp))
5603                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5604
5605         /* Reset PCIE errors for debug */
5606         REG_WR(bp, 0x2114, 0xffffffff);
5607         REG_WR(bp, 0x2120, 0xffffffff);
5608
5609         return 0;
5610 }
5611
5612 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5613 {
5614         int i, rc = 0;
5615
5616         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5617            BP_FUNC(bp), load_code);
5618
5619         bp->dmae_ready = 0;
5620         mutex_init(&bp->dmae_mutex);
5621         bnx2x_gunzip_init(bp);
5622
5623         switch (load_code) {
5624         case FW_MSG_CODE_DRV_LOAD_COMMON:
5625                 rc = bnx2x_init_common(bp);
5626                 if (rc)
5627                         goto init_hw_err;
5628                 /* no break */
5629
5630         case FW_MSG_CODE_DRV_LOAD_PORT:
5631                 bp->dmae_ready = 1;
5632                 rc = bnx2x_init_port(bp);
5633                 if (rc)
5634                         goto init_hw_err;
5635                 /* no break */
5636
5637         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5638                 bp->dmae_ready = 1;
5639                 rc = bnx2x_init_func(bp);
5640                 if (rc)
5641                         goto init_hw_err;
5642                 break;
5643
5644         default:
5645                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5646                 break;
5647         }
5648
5649         if (!BP_NOMCP(bp)) {
5650                 int func = BP_FUNC(bp);
5651
5652                 bp->fw_drv_pulse_wr_seq =
5653                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5654                                  DRV_PULSE_SEQ_MASK);
5655                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5656                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5657                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5658         } else
5659                 bp->func_stx = 0;
5660
5661         /* this needs to be done before gunzip end */
5662         bnx2x_zero_def_sb(bp);
5663         for_each_queue(bp, i)
5664                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5665
5666 init_hw_err:
5667         bnx2x_gunzip_end(bp);
5668
5669         return rc;
5670 }
5671
5672 /* send the MCP a request, block until there is a reply */
5673 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5674 {
5675         int func = BP_FUNC(bp);
5676         u32 seq = ++bp->fw_seq;
5677         u32 rc = 0;
5678         u32 cnt = 1;
5679         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5680
5681         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5682         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5683
5684         do {
5685                 /* let the FW do it's magic ... */
5686                 msleep(delay);
5687
5688                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5689
5690                 /* Give the FW up to 2 second (200*10ms) */
5691         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5692
5693         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5694            cnt*delay, rc, seq);
5695
5696         /* is this a reply to our command? */
5697         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5698                 rc &= FW_MSG_CODE_MASK;
5699
5700         } else {
5701                 /* FW BUG! */
5702                 BNX2X_ERR("FW failed to respond!\n");
5703                 bnx2x_fw_dump(bp);
5704                 rc = 0;
5705         }
5706
5707         return rc;
5708 }
5709
5710 static void bnx2x_free_mem(struct bnx2x *bp)
5711 {
5712
5713 #define BNX2X_PCI_FREE(x, y, size) \
5714         do { \
5715                 if (x) { \
5716                         pci_free_consistent(bp->pdev, size, x, y); \
5717                         x = NULL; \
5718                         y = 0; \
5719                 } \
5720         } while (0)
5721
5722 #define BNX2X_FREE(x) \
5723         do { \
5724                 if (x) { \
5725                         vfree(x); \
5726                         x = NULL; \
5727                 } \
5728         } while (0)
5729
5730         int i;
5731
5732         /* fastpath */
5733         for_each_queue(bp, i) {
5734
5735                 /* Status blocks */
5736                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5737                                bnx2x_fp(bp, i, status_blk_mapping),
5738                                sizeof(struct host_status_block) +
5739                                sizeof(struct eth_tx_db_data));
5740
5741                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5742                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5743                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5744                                bnx2x_fp(bp, i, tx_desc_mapping),
5745                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5746
5747                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5748                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5749                                bnx2x_fp(bp, i, rx_desc_mapping),
5750                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5751
5752                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5753                                bnx2x_fp(bp, i, rx_comp_mapping),
5754                                sizeof(struct eth_fast_path_rx_cqe) *
5755                                NUM_RCQ_BD);
5756
5757                 /* SGE ring */
5758                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5760                                bnx2x_fp(bp, i, rx_sge_mapping),
5761                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5762         }
5763         /* end of fastpath */
5764
5765         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5766                        sizeof(struct host_def_status_block));
5767
5768         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5769                        sizeof(struct bnx2x_slowpath));
5770
5771 #ifdef BCM_ISCSI
5772         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5773         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5774         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5775         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5776 #endif
5777         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5778
5779 #undef BNX2X_PCI_FREE
5780 #undef BNX2X_KFREE
5781 }
5782
5783 static int bnx2x_alloc_mem(struct bnx2x *bp)
5784 {
5785
5786 #define BNX2X_PCI_ALLOC(x, y, size) \
5787         do { \
5788                 x = pci_alloc_consistent(bp->pdev, size, y); \
5789                 if (x == NULL) \
5790                         goto alloc_mem_err; \
5791                 memset(x, 0, size); \
5792         } while (0)
5793
5794 #define BNX2X_ALLOC(x, size) \
5795         do { \
5796                 x = vmalloc(size); \
5797                 if (x == NULL) \
5798                         goto alloc_mem_err; \
5799                 memset(x, 0, size); \
5800         } while (0)
5801
5802         int i;
5803
5804         /* fastpath */
5805         for_each_queue(bp, i) {
5806                 bnx2x_fp(bp, i, bp) = bp;
5807
5808                 /* Status blocks */
5809                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5810                                 &bnx2x_fp(bp, i, status_blk_mapping),
5811                                 sizeof(struct host_status_block) +
5812                                 sizeof(struct eth_tx_db_data));
5813
5814                 bnx2x_fp(bp, i, hw_tx_prods) =
5815                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5816
5817                 bnx2x_fp(bp, i, tx_prods_mapping) =
5818                                 bnx2x_fp(bp, i, status_blk_mapping) +
5819                                 sizeof(struct host_status_block);
5820
5821                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5822                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5823                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5824                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5825                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5826                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5827
5828                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5829                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5830                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5831                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5832                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5833
5834                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5835                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5836                                 sizeof(struct eth_fast_path_rx_cqe) *
5837                                 NUM_RCQ_BD);
5838
5839                 /* SGE ring */
5840                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5841                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5842                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5843                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5844                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5845         }
5846         /* end of fastpath */
5847
5848         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849                         sizeof(struct host_def_status_block));
5850
5851         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852                         sizeof(struct bnx2x_slowpath));
5853
5854 #ifdef BCM_ISCSI
5855         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5856
5857         /* Initialize T1 */
5858         for (i = 0; i < 64*1024; i += 64) {
5859                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5861         }
5862
5863         /* allocate searcher T2 table
5864            we allocate 1/4 of alloc num for T2
5865           (which is not entered into the ILT) */
5866         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5867
5868         /* Initialize T2 */
5869         for (i = 0; i < 16*1024; i += 64)
5870                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871
5872         /* now fixup the last line in the block to point to the next block */
5873         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874
5875         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5877
5878         /* QM queues (128*MAX_CONN) */
5879         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5880 #endif
5881
5882         /* Slow path ring */
5883         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5884
5885         return 0;
5886
5887 alloc_mem_err:
5888         bnx2x_free_mem(bp);
5889         return -ENOMEM;
5890
5891 #undef BNX2X_PCI_ALLOC
5892 #undef BNX2X_ALLOC
5893 }
5894
5895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5896 {
5897         int i;
5898
5899         for_each_queue(bp, i) {
5900                 struct bnx2x_fastpath *fp = &bp->fp[i];
5901
5902                 u16 bd_cons = fp->tx_bd_cons;
5903                 u16 sw_prod = fp->tx_pkt_prod;
5904                 u16 sw_cons = fp->tx_pkt_cons;
5905
5906                 while (sw_cons != sw_prod) {
5907                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5908                         sw_cons++;
5909                 }
5910         }
5911 }
5912
5913 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5914 {
5915         int i, j;
5916
5917         for_each_queue(bp, j) {
5918                 struct bnx2x_fastpath *fp = &bp->fp[j];
5919
5920                 for (i = 0; i < NUM_RX_BD; i++) {
5921                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5922                         struct sk_buff *skb = rx_buf->skb;
5923
5924                         if (skb == NULL)
5925                                 continue;
5926
5927                         pci_unmap_single(bp->pdev,
5928                                          pci_unmap_addr(rx_buf, mapping),
5929                                          bp->rx_buf_use_size,
5930                                          PCI_DMA_FROMDEVICE);
5931
5932                         rx_buf->skb = NULL;
5933                         dev_kfree_skb(skb);
5934                 }
5935                 if (!fp->disable_tpa)
5936                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5937                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5938                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5939         }
5940 }
5941
5942 static void bnx2x_free_skbs(struct bnx2x *bp)
5943 {
5944         bnx2x_free_tx_skbs(bp);
5945         bnx2x_free_rx_skbs(bp);
5946 }
5947
5948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5949 {
5950         int i, offset = 1;
5951
5952         free_irq(bp->msix_table[0].vector, bp->dev);
5953         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5954            bp->msix_table[0].vector);
5955
5956         for_each_queue(bp, i) {
5957                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5958                    "state %x\n", i, bp->msix_table[i + offset].vector,
5959                    bnx2x_fp(bp, i, state));
5960
5961                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5962                         BNX2X_ERR("IRQ of fp #%d being freed while "
5963                                   "state != closed\n", i);
5964
5965                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5966         }
5967 }
5968
5969 static void bnx2x_free_irq(struct bnx2x *bp)
5970 {
5971         if (bp->flags & USING_MSIX_FLAG) {
5972                 bnx2x_free_msix_irqs(bp);
5973                 pci_disable_msix(bp->pdev);
5974                 bp->flags &= ~USING_MSIX_FLAG;
5975
5976         } else
5977                 free_irq(bp->pdev->irq, bp->dev);
5978 }
5979
5980 static int bnx2x_enable_msix(struct bnx2x *bp)
5981 {
5982         int i, rc, offset;
5983
5984         bp->msix_table[0].entry = 0;
5985         offset = 1;
5986         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5987
5988         for_each_queue(bp, i) {
5989                 int igu_vec = offset + i + BP_L_ID(bp);
5990
5991                 bp->msix_table[i + offset].entry = igu_vec;
5992                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5993                    "(fastpath #%u)\n", i + offset, igu_vec, i);
5994         }
5995
5996         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997                              bp->num_queues + offset);
5998         if (rc) {
5999                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6000                 return -1;
6001         }
6002         bp->flags |= USING_MSIX_FLAG;
6003
6004         return 0;
6005 }
6006
6007 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6008 {
6009         int i, rc, offset = 1;
6010
6011         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6012                          bp->dev->name, bp->dev);
6013         if (rc) {
6014                 BNX2X_ERR("request sp irq failed\n");
6015                 return -EBUSY;
6016         }
6017
6018         for_each_queue(bp, i) {
6019                 rc = request_irq(bp->msix_table[i + offset].vector,
6020                                  bnx2x_msix_fp_int, 0,
6021                                  bp->dev->name, &bp->fp[i]);
6022                 if (rc) {
6023                         BNX2X_ERR("request fp #%d irq failed  rc %d\n",
6024                                   i + offset, rc);
6025                         bnx2x_free_msix_irqs(bp);
6026                         return -EBUSY;
6027                 }
6028
6029                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6030         }
6031
6032         return 0;
6033 }
6034
6035 static int bnx2x_req_irq(struct bnx2x *bp)
6036 {
6037         int rc;
6038
6039         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6040                          bp->dev->name, bp->dev);
6041         if (!rc)
6042                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6043
6044         return rc;
6045 }
6046
6047 /*
6048  * Init service functions
6049  */
6050
6051 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6052 {
6053         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6054         int port = BP_PORT(bp);
6055
6056         /* CAM allocation
6057          * unicasts 0-31:port0 32-63:port1
6058          * multicast 64-127:port0 128-191:port1
6059          */
6060         config->hdr.length_6b = 2;
6061         config->hdr.offset = port ? 31 : 0;
6062         config->hdr.client_id = BP_CL_ID(bp);
6063         config->hdr.reserved1 = 0;
6064
6065         /* primary MAC */
6066         config->config_table[0].cam_entry.msb_mac_addr =
6067                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6068         config->config_table[0].cam_entry.middle_mac_addr =
6069                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6070         config->config_table[0].cam_entry.lsb_mac_addr =
6071                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6072         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6073         config->config_table[0].target_table_entry.flags = 0;
6074         config->config_table[0].target_table_entry.client_id = 0;
6075         config->config_table[0].target_table_entry.vlan_id = 0;
6076
6077         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6078            config->config_table[0].cam_entry.msb_mac_addr,
6079            config->config_table[0].cam_entry.middle_mac_addr,
6080            config->config_table[0].cam_entry.lsb_mac_addr);
6081
6082         /* broadcast */
6083         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6084         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6085         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6086         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6087         config->config_table[1].target_table_entry.flags =
6088                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6089         config->config_table[1].target_table_entry.client_id = 0;
6090         config->config_table[1].target_table_entry.vlan_id = 0;
6091
6092         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6093                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6094                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6095 }
6096
6097 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6098 {
6099         struct mac_configuration_cmd_e1h *config =
6100                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6101
6102         if (bp->state != BNX2X_STATE_OPEN) {
6103                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6104                 return;
6105         }
6106
6107         /* CAM allocation for E1H
6108          * unicasts: by func number
6109          * multicast: 20+FUNC*20, 20 each
6110          */
6111         config->hdr.length_6b = 1;
6112         config->hdr.offset = BP_FUNC(bp);
6113         config->hdr.client_id = BP_CL_ID(bp);
6114         config->hdr.reserved1 = 0;
6115
6116         /* primary MAC */
6117         config->config_table[0].msb_mac_addr =
6118                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6119         config->config_table[0].middle_mac_addr =
6120                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6121         config->config_table[0].lsb_mac_addr =
6122                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6123         config->config_table[0].client_id = BP_L_ID(bp);
6124         config->config_table[0].vlan_id = 0;
6125         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6126         config->config_table[0].flags = BP_PORT(bp);
6127
6128         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6129            config->config_table[0].msb_mac_addr,
6130            config->config_table[0].middle_mac_addr,
6131            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6132
6133         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6134                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6135                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6136 }
6137
6138 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6139                              int *state_p, int poll)
6140 {
6141         /* can take a while if any port is running */
6142         int cnt = 500;
6143
6144         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6145            poll ? "polling" : "waiting", state, idx);
6146
6147         might_sleep();
6148         while (cnt--) {
6149                 if (poll) {
6150                         bnx2x_rx_int(bp->fp, 10);
6151                         /* if index is different from 0
6152                          * the reply for some commands will
6153                          * be on the none default queue
6154                          */
6155                         if (idx)
6156                                 bnx2x_rx_int(&bp->fp[idx], 10);
6157                 }
6158                 mb(); /* state is changed by bnx2x_sp_event() */
6159
6160                 if (*state_p == state)
6161                         return 0;
6162
6163                 msleep(1);
6164         }
6165
6166         /* timeout! */
6167         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6168                   poll ? "polling" : "waiting", state, idx);
6169 #ifdef BNX2X_STOP_ON_ERROR
6170         bnx2x_panic();
6171 #endif
6172
6173         return -EBUSY;
6174 }
6175
6176 static int bnx2x_setup_leading(struct bnx2x *bp)
6177 {
6178         int rc;
6179
6180         /* reset IGU state */
6181         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6182
6183         /* SETUP ramrod */
6184         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6185
6186         /* Wait for completion */
6187         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6188
6189         return rc;
6190 }
6191
6192 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6193 {
6194         /* reset IGU state */
6195         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6196
6197         /* SETUP ramrod */
6198         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6199         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6200
6201         /* Wait for completion */
6202         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6203                                  &(bp->fp[index].state), 0);
6204 }
6205
6206 static int bnx2x_poll(struct napi_struct *napi, int budget);
6207 static void bnx2x_set_rx_mode(struct net_device *dev);
6208
6209 /* must be called with rtnl_lock */
6210 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6211 {
6212         u32 load_code;
6213         int i, rc;
6214
6215 #ifdef BNX2X_STOP_ON_ERROR
6216         if (unlikely(bp->panic))
6217                 return -EPERM;
6218 #endif
6219
6220         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6221
6222         /* Send LOAD_REQUEST command to MCP
6223            Returns the type of LOAD command:
6224            if it is the first port to be initialized
6225            common blocks should be initialized, otherwise - not
6226         */
6227         if (!BP_NOMCP(bp)) {
6228                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6229                 if (!load_code) {
6230                         BNX2X_ERR("MCP response failure, aborting\n");
6231                         return -EBUSY;
6232                 }
6233                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6234                         return -EBUSY; /* other port in diagnostic mode */
6235
6236         } else {
6237                 int port = BP_PORT(bp);
6238
6239                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6240                    load_count[0], load_count[1], load_count[2]);
6241                 load_count[0]++;
6242                 load_count[1 + port]++;
6243                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6244                    load_count[0], load_count[1], load_count[2]);
6245                 if (load_count[0] == 1)
6246                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6247                 else if (load_count[1 + port] == 1)
6248                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6249                 else
6250                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6251         }
6252
6253         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6254             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6255                 bp->port.pmf = 1;
6256         else
6257                 bp->port.pmf = 0;
6258         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6259
6260         /* if we can't use MSI-X we only need one fp,
6261          * so try to enable MSI-X with the requested number of fp's
6262          * and fallback to inta with one fp
6263          */
6264         if (use_inta) {
6265                 bp->num_queues = 1;
6266
6267         } else {
6268                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6269                         /* user requested number */
6270                         bp->num_queues = use_multi;
6271
6272                 else if (use_multi)
6273                         bp->num_queues = min_t(u32, num_online_cpus(),
6274                                                BP_MAX_QUEUES(bp));
6275                 else
6276                         bp->num_queues = 1;
6277
6278                 if (bnx2x_enable_msix(bp)) {
6279                         /* failed to enable MSI-X */
6280                         bp->num_queues = 1;
6281                         if (use_multi)
6282                                 BNX2X_ERR("Multi requested but failed"
6283                                           " to enable MSI-X\n");
6284                 }
6285         }
6286         DP(NETIF_MSG_IFUP,
6287            "set number of queues to %d\n", bp->num_queues);
6288
6289         if (bnx2x_alloc_mem(bp))
6290                 return -ENOMEM;
6291
6292         for_each_queue(bp, i)
6293                 bnx2x_fp(bp, i, disable_tpa) =
6294                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6295
6296         if (bp->flags & USING_MSIX_FLAG) {
6297                 rc = bnx2x_req_msix_irqs(bp);
6298                 if (rc) {
6299                         pci_disable_msix(bp->pdev);
6300                         goto load_error;
6301                 }
6302         } else {
6303                 bnx2x_ack_int(bp);
6304                 rc = bnx2x_req_irq(bp);
6305                 if (rc) {
6306                         BNX2X_ERR("IRQ request failed, aborting\n");
6307                         goto load_error;
6308                 }
6309         }
6310
6311         for_each_queue(bp, i)
6312                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6313                                bnx2x_poll, 128);
6314
6315         /* Initialize HW */
6316         rc = bnx2x_init_hw(bp, load_code);
6317         if (rc) {
6318                 BNX2X_ERR("HW init failed, aborting\n");
6319                 goto load_error;
6320         }
6321
6322         /* Setup NIC internals and enable interrupts */
6323         bnx2x_nic_init(bp, load_code);
6324
6325         /* Send LOAD_DONE command to MCP */
6326         if (!BP_NOMCP(bp)) {
6327                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6328                 if (!load_code) {
6329                         BNX2X_ERR("MCP response failure, aborting\n");
6330                         rc = -EBUSY;
6331                         goto load_int_disable;
6332                 }
6333         }
6334
6335         bnx2x_stats_init(bp);
6336
6337         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6338
6339         /* Enable Rx interrupt handling before sending the ramrod
6340            as it's completed on Rx FP queue */
6341         for_each_queue(bp, i)
6342                 napi_enable(&bnx2x_fp(bp, i, napi));
6343
6344         /* Enable interrupt handling */
6345         atomic_set(&bp->intr_sem, 0);
6346
6347         rc = bnx2x_setup_leading(bp);
6348         if (rc) {
6349                 BNX2X_ERR("Setup leading failed!\n");
6350                 goto load_stop_netif;
6351         }
6352
6353         if (CHIP_IS_E1H(bp))
6354                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6355                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6356                         bp->state = BNX2X_STATE_DISABLED;
6357                 }
6358
6359         if (bp->state == BNX2X_STATE_OPEN)
6360                 for_each_nondefault_queue(bp, i) {
6361                         rc = bnx2x_setup_multi(bp, i);
6362                         if (rc)
6363                                 goto load_stop_netif;
6364                 }
6365
6366         if (CHIP_IS_E1(bp))
6367                 bnx2x_set_mac_addr_e1(bp);
6368         else
6369                 bnx2x_set_mac_addr_e1h(bp);
6370
6371         if (bp->port.pmf)
6372                 bnx2x_initial_phy_init(bp);
6373
6374         /* Start fast path */
6375         switch (load_mode) {
6376         case LOAD_NORMAL:
6377                 /* Tx queue should be only reenabled */
6378                 netif_wake_queue(bp->dev);
6379                 bnx2x_set_rx_mode(bp->dev);
6380                 break;
6381
6382         case LOAD_OPEN:
6383                 netif_start_queue(bp->dev);
6384                 bnx2x_set_rx_mode(bp->dev);
6385                 if (bp->flags & USING_MSIX_FLAG)
6386                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6387                                bp->dev->name);
6388                 break;
6389
6390         case LOAD_DIAG:
6391                 bnx2x_set_rx_mode(bp->dev);
6392                 bp->state = BNX2X_STATE_DIAG;
6393                 break;
6394
6395         default:
6396                 break;
6397         }
6398
6399         if (!bp->port.pmf)
6400                 bnx2x__link_status_update(bp);
6401
6402         /* start the timer */
6403         mod_timer(&bp->timer, jiffies + bp->current_interval);
6404
6405
6406         return 0;
6407
6408 load_stop_netif:
6409         for_each_queue(bp, i)
6410                 napi_disable(&bnx2x_fp(bp, i, napi));
6411
6412 load_int_disable:
6413         bnx2x_int_disable_sync(bp);
6414
6415         /* Release IRQs */
6416         bnx2x_free_irq(bp);
6417
6418         /* Free SKBs, SGEs, TPA pool and driver internals */
6419         bnx2x_free_skbs(bp);
6420         for_each_queue(bp, i)
6421                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6422                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6423 load_error:
6424         bnx2x_free_mem(bp);
6425
6426         /* TBD we really need to reset the chip
6427            if we want to recover from this */
6428         return rc;
6429 }
6430
6431 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6432 {
6433         int rc;
6434
6435         /* halt the connection */
6436         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6437         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6438
6439         /* Wait for completion */
6440         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6441                                &(bp->fp[index].state), 1);
6442         if (rc) /* timeout */
6443                 return rc;
6444
6445         /* delete cfc entry */
6446         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6447
6448         /* Wait for completion */
6449         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6450                                &(bp->fp[index].state), 1);
6451         return rc;
6452 }
6453
6454 static int bnx2x_stop_leading(struct bnx2x *bp)
6455 {
6456         u16 dsb_sp_prod_idx;
6457         /* if the other port is handling traffic,
6458            this can take a lot of time */
6459         int cnt = 500;
6460         int rc;
6461
6462         might_sleep();
6463
6464         /* Send HALT ramrod */
6465         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6466         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6467
6468         /* Wait for completion */
6469         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6470                                &(bp->fp[0].state), 1);
6471         if (rc) /* timeout */
6472                 return rc;
6473
6474         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6475
6476         /* Send PORT_DELETE ramrod */
6477         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6478
6479         /* Wait for completion to arrive on default status block
6480            we are going to reset the chip anyway
6481            so there is not much to do if this times out
6482          */
6483         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6484                 if (!cnt) {
6485                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6486                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6487                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6488 #ifdef BNX2X_STOP_ON_ERROR
6489                         bnx2x_panic();
6490 #else
6491                         rc = -EBUSY;
6492 #endif
6493                         break;
6494                 }
6495                 cnt--;
6496                 msleep(1);
6497         }
6498         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6499         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6500
6501         return rc;
6502 }
6503
6504 static void bnx2x_reset_func(struct bnx2x *bp)
6505 {
6506         int port = BP_PORT(bp);
6507         int func = BP_FUNC(bp);
6508         int base, i;
6509
6510         /* Configure IGU */
6511         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6512         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6513
6514         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6515
6516         /* Clear ILT */
6517         base = FUNC_ILT_BASE(func);
6518         for (i = base; i < base + ILT_PER_FUNC; i++)
6519                 bnx2x_ilt_wr(bp, i, 0);
6520 }
6521
6522 static void bnx2x_reset_port(struct bnx2x *bp)
6523 {
6524         int port = BP_PORT(bp);
6525         u32 val;
6526
6527         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6528
6529         /* Do not rcv packets to BRB */
6530         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6531         /* Do not direct rcv packets that are not for MCP to the BRB */
6532         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6533                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6534
6535         /* Configure AEU */
6536         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6537
6538         msleep(100);
6539         /* Check for BRB port occupancy */
6540         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6541         if (val)
6542                 DP(NETIF_MSG_IFDOWN,
6543                    "BRB1 is not empty  %d blooks are occupied\n", val);
6544
6545         /* TODO: Close Doorbell port? */
6546 }
6547
6548 static void bnx2x_reset_common(struct bnx2x *bp)
6549 {
6550         /* reset_common */
6551         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6552                0xd3ffff7f);
6553         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6554 }
6555
6556 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6557 {
6558         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6559            BP_FUNC(bp), reset_code);
6560
6561         switch (reset_code) {
6562         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6563                 bnx2x_reset_port(bp);
6564                 bnx2x_reset_func(bp);
6565                 bnx2x_reset_common(bp);
6566                 break;
6567
6568         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6569                 bnx2x_reset_port(bp);
6570                 bnx2x_reset_func(bp);
6571                 break;
6572
6573         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6574                 bnx2x_reset_func(bp);
6575                 break;
6576
6577         default:
6578                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6579                 break;
6580         }
6581 }
6582
6583 /* msut be called with rtnl_lock */
6584 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6585 {
6586         int port = BP_PORT(bp);
6587         u32 reset_code = 0;
6588         int i, cnt, rc;
6589
6590         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6591
6592         bp->rx_mode = BNX2X_RX_MODE_NONE;
6593         bnx2x_set_storm_rx_mode(bp);
6594
6595         if (netif_running(bp->dev)) {
6596                 netif_tx_disable(bp->dev);
6597                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6598         }
6599
6600         del_timer_sync(&bp->timer);
6601         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6602                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6603         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6604
6605         /* Wait until tx fast path tasks complete */
6606         for_each_queue(bp, i) {
6607                 struct bnx2x_fastpath *fp = &bp->fp[i];
6608
6609                 cnt = 1000;
6610                 smp_rmb();
6611                 while (BNX2X_HAS_TX_WORK(fp)) {
6612
6613                         if (!netif_running(bp->dev))
6614                                 bnx2x_tx_int(fp, 1000);
6615
6616                         if (!cnt) {
6617                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6618                                           i);
6619 #ifdef BNX2X_STOP_ON_ERROR
6620                                 bnx2x_panic();
6621                                 return -EBUSY;
6622 #else
6623                                 break;
6624 #endif
6625                         }
6626                         cnt--;
6627                         msleep(1);
6628                         smp_rmb();
6629                 }
6630         }
6631
6632         /* Give HW time to discard old tx messages */
6633         msleep(1);
6634
6635         for_each_queue(bp, i)
6636                 napi_disable(&bnx2x_fp(bp, i, napi));
6637         /* Disable interrupts after Tx and Rx are disabled on stack level */
6638         bnx2x_int_disable_sync(bp);
6639
6640         /* Release IRQs */
6641         bnx2x_free_irq(bp);
6642
6643         if (unload_mode == UNLOAD_NORMAL)
6644                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6645
6646         else if (bp->flags & NO_WOL_FLAG) {
6647                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6648                 if (CHIP_IS_E1H(bp))
6649                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6650
6651         } else if (bp->wol) {
6652                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6653                 u8 *mac_addr = bp->dev->dev_addr;
6654                 u32 val;
6655                 /* The mac address is written to entries 1-4 to
6656                    preserve entry 0 which is used by the PMF */
6657                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6658
6659                 val = (mac_addr[0] << 8) | mac_addr[1];
6660                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6661
6662                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6663                       (mac_addr[4] << 8) | mac_addr[5];
6664                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6665
6666                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6667
6668         } else
6669                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6670
6671         if (CHIP_IS_E1H(bp))
6672                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6673
6674         /* Close multi and leading connections
6675            Completions for ramrods are collected in a synchronous way */
6676         for_each_nondefault_queue(bp, i)
6677                 if (bnx2x_stop_multi(bp, i))
6678                         goto unload_error;
6679
6680         rc = bnx2x_stop_leading(bp);
6681         if (rc) {
6682                 BNX2X_ERR("Stop leading failed!\n");
6683 #ifdef BNX2X_STOP_ON_ERROR
6684                 return -EBUSY;
6685 #else
6686                 goto unload_error;
6687 #endif
6688         }
6689
6690 unload_error:
6691         if (!BP_NOMCP(bp))
6692                 reset_code = bnx2x_fw_command(bp, reset_code);
6693         else {
6694                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6695                    load_count[0], load_count[1], load_count[2]);
6696                 load_count[0]--;
6697                 load_count[1 + port]--;
6698                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6699                    load_count[0], load_count[1], load_count[2]);
6700                 if (load_count[0] == 0)
6701                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6702                 else if (load_count[1 + port] == 0)
6703                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6704                 else
6705                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6706         }
6707
6708         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6709             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6710                 bnx2x__link_reset(bp);
6711
6712         /* Reset the chip */
6713         bnx2x_reset_chip(bp, reset_code);
6714
6715         /* Report UNLOAD_DONE to MCP */
6716         if (!BP_NOMCP(bp))
6717                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6718
6719         /* Free SKBs, SGEs, TPA pool and driver internals */
6720         bnx2x_free_skbs(bp);
6721         for_each_queue(bp, i)
6722                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6723                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6724         bnx2x_free_mem(bp);
6725
6726         bp->state = BNX2X_STATE_CLOSED;
6727
6728         netif_carrier_off(bp->dev);
6729
6730         return 0;
6731 }
6732
6733 static void bnx2x_reset_task(struct work_struct *work)
6734 {
6735         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6736
6737 #ifdef BNX2X_STOP_ON_ERROR
6738         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6739                   " so reset not done to allow debug dump,\n"
6740          KERN_ERR " you will need to reboot when done\n");
6741         return;
6742 #endif
6743
6744         rtnl_lock();
6745
6746         if (!netif_running(bp->dev))
6747                 goto reset_task_exit;
6748
6749         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6750         bnx2x_nic_load(bp, LOAD_NORMAL);
6751
6752 reset_task_exit:
6753         rtnl_unlock();
6754 }
6755
6756 /* end of nic load/unload */
6757
6758 /* ethtool_ops */
6759
6760 /*
6761  * Init service functions
6762  */
6763
6764 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6765 {
6766         u32 val;
6767
6768         /* Check if there is any driver already loaded */
6769         val = REG_RD(bp, MISC_REG_UNPREPARED);
6770         if (val == 0x1) {
6771                 /* Check if it is the UNDI driver
6772                  * UNDI driver initializes CID offset for normal bell to 0x7
6773                  */
6774                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6775                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6776                 if (val == 0x7) {
6777                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6778                         /* save our func */
6779                         int func = BP_FUNC(bp);
6780                         u32 swap_en;
6781                         u32 swap_val;
6782
6783                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6784
6785                         /* try unload UNDI on port 0 */
6786                         bp->func = 0;
6787                         bp->fw_seq =
6788                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6789                                 DRV_MSG_SEQ_NUMBER_MASK);
6790                         reset_code = bnx2x_fw_command(bp, reset_code);
6791
6792                         /* if UNDI is loaded on the other port */
6793                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6794
6795                                 /* send "DONE" for previous unload */
6796                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6797
6798                                 /* unload UNDI on port 1 */
6799                                 bp->func = 1;
6800                                 bp->fw_seq =
6801                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6802                                         DRV_MSG_SEQ_NUMBER_MASK);
6803                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6804
6805                                 bnx2x_fw_command(bp, reset_code);
6806                         }
6807
6808                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6809                                     HC_REG_CONFIG_0), 0x1000);
6810
6811                         /* close input traffic and wait for it */
6812                         /* Do not rcv packets to BRB */
6813                         REG_WR(bp,
6814                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6815                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6816                         /* Do not direct rcv packets that are not for MCP to
6817                          * the BRB */
6818                         REG_WR(bp,
6819                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6820                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6821                         /* clear AEU */
6822                         REG_WR(bp,
6823                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6824                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6825                         msleep(10);
6826
6827                         /* save NIG port swap info */
6828                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6829                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6830                         /* reset device */
6831                         REG_WR(bp,
6832                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6833                                0xd3ffffff);
6834                         REG_WR(bp,
6835                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6836                                0x1403);
6837                         /* take the NIG out of reset and restore swap values */
6838                         REG_WR(bp,
6839                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6840                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6841                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6842                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6843
6844                         /* send unload done to the MCP */
6845                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6846
6847                         /* restore our func and fw_seq */
6848                         bp->func = func;
6849                         bp->fw_seq =
6850                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6851                                 DRV_MSG_SEQ_NUMBER_MASK);
6852                 }
6853                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6854         }
6855 }
6856
6857 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6858 {
6859         u32 val, val2, val3, val4, id;
6860
6861         /* Get the chip revision id and number. */
6862         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6863         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6864         id = ((val & 0xffff) << 16);
6865         val = REG_RD(bp, MISC_REG_CHIP_REV);
6866         id |= ((val & 0xf) << 12);
6867         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6868         id |= ((val & 0xff) << 4);
6869         REG_RD(bp, MISC_REG_BOND_ID);
6870         id |= (val & 0xf);
6871         bp->common.chip_id = id;
6872         bp->link_params.chip_id = bp->common.chip_id;
6873         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6874
6875         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6876         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6877                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6878         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6879                        bp->common.flash_size, bp->common.flash_size);
6880
6881         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6882         bp->link_params.shmem_base = bp->common.shmem_base;
6883         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6884
6885         if (!bp->common.shmem_base ||
6886             (bp->common.shmem_base < 0xA0000) ||
6887             (bp->common.shmem_base >= 0xC0000)) {
6888                 BNX2X_DEV_INFO("MCP not active\n");
6889                 bp->flags |= NO_MCP_FLAG;
6890                 return;
6891         }
6892
6893         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6894         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6895                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6896                 BNX2X_ERR("BAD MCP validity signature\n");
6897
6898         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6899         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6900
6901         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6902                        bp->common.hw_config, bp->common.board);
6903
6904         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6905                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6906                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6907
6908         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6909         bp->common.bc_ver = val;
6910         BNX2X_DEV_INFO("bc_ver %X\n", val);
6911         if (val < BNX2X_BC_VER) {
6912                 /* for now only warn
6913                  * later we might need to enforce this */
6914                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6915                           " please upgrade BC\n", BNX2X_BC_VER, val);
6916         }
6917         BNX2X_DEV_INFO("%sWoL Capable\n",
6918                        (bp->flags & NO_WOL_FLAG)? "Not " : "");
6919
6920         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6921         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6922         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6923         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6924
6925         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6926                val, val2, val3, val4);
6927 }
6928
6929 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6930                                                     u32 switch_cfg)
6931 {
6932         int port = BP_PORT(bp);
6933         u32 ext_phy_type;
6934
6935         switch (switch_cfg) {
6936         case SWITCH_CFG_1G:
6937                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6938
6939                 ext_phy_type =
6940                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6941                 switch (ext_phy_type) {
6942                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6943                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6944                                        ext_phy_type);
6945
6946                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6947                                                SUPPORTED_10baseT_Full |
6948                                                SUPPORTED_100baseT_Half |
6949                                                SUPPORTED_100baseT_Full |
6950                                                SUPPORTED_1000baseT_Full |
6951                                                SUPPORTED_2500baseX_Full |
6952                                                SUPPORTED_TP |
6953                                                SUPPORTED_FIBRE |
6954                                                SUPPORTED_Autoneg |
6955                                                SUPPORTED_Pause |
6956                                                SUPPORTED_Asym_Pause);
6957                         break;
6958
6959                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6960                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6961                                        ext_phy_type);
6962
6963                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6964                                                SUPPORTED_10baseT_Full |
6965                                                SUPPORTED_100baseT_Half |
6966                                                SUPPORTED_100baseT_Full |
6967                                                SUPPORTED_1000baseT_Full |
6968                                                SUPPORTED_TP |
6969                                                SUPPORTED_FIBRE |
6970                                                SUPPORTED_Autoneg |
6971                                                SUPPORTED_Pause |
6972                                                SUPPORTED_Asym_Pause);
6973                         break;
6974
6975                 default:
6976                         BNX2X_ERR("NVRAM config error. "
6977                                   "BAD SerDes ext_phy_config 0x%x\n",
6978                                   bp->link_params.ext_phy_config);
6979                         return;
6980                 }
6981
6982                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6983                                            port*0x10);
6984                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6985                 break;
6986
6987         case SWITCH_CFG_10G:
6988                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6989
6990                 ext_phy_type =
6991                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6992                 switch (ext_phy_type) {
6993                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6994                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6995                                        ext_phy_type);
6996
6997                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6998                                                SUPPORTED_10baseT_Full |
6999                                                SUPPORTED_100baseT_Half |
7000                                                SUPPORTED_100baseT_Full |
7001                                                SUPPORTED_1000baseT_Full |
7002                                                SUPPORTED_2500baseX_Full |
7003                                                SUPPORTED_10000baseT_Full |
7004                                                SUPPORTED_TP |
7005                                                SUPPORTED_FIBRE |
7006                                                SUPPORTED_Autoneg |
7007                                                SUPPORTED_Pause |
7008                                                SUPPORTED_Asym_Pause);
7009                         break;
7010
7011                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7012                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7013                                        ext_phy_type);
7014
7015                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7016                                                SUPPORTED_FIBRE |
7017                                                SUPPORTED_Pause |
7018                                                SUPPORTED_Asym_Pause);
7019                         break;
7020
7021                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7022                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7023                                        ext_phy_type);
7024
7025                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7026                                                SUPPORTED_1000baseT_Full |
7027                                                SUPPORTED_FIBRE |
7028                                                SUPPORTED_Pause |
7029                                                SUPPORTED_Asym_Pause);
7030                         break;
7031
7032                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7033                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7034                                        ext_phy_type);
7035
7036                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7037                                                SUPPORTED_1000baseT_Full |
7038                                                SUPPORTED_FIBRE |
7039                                                SUPPORTED_Autoneg |
7040                                                SUPPORTED_Pause |
7041                                                SUPPORTED_Asym_Pause);
7042                         break;
7043
7044                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7045                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7046                                        ext_phy_type);
7047
7048                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7049                                                SUPPORTED_2500baseX_Full |
7050                                                SUPPORTED_1000baseT_Full |
7051                                                SUPPORTED_FIBRE |
7052                                                SUPPORTED_Autoneg |
7053                                                SUPPORTED_Pause |
7054                                                SUPPORTED_Asym_Pause);
7055                         break;
7056
7057                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7058                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7059                                        ext_phy_type);
7060
7061                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7062                                                SUPPORTED_TP |
7063                                                SUPPORTED_Autoneg |
7064                                                SUPPORTED_Pause |
7065                                                SUPPORTED_Asym_Pause);
7066                         break;
7067
7068                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7069                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7070                                   bp->link_params.ext_phy_config);
7071                         break;
7072
7073                 default:
7074                         BNX2X_ERR("NVRAM config error. "
7075                                   "BAD XGXS ext_phy_config 0x%x\n",
7076                                   bp->link_params.ext_phy_config);
7077                         return;
7078                 }
7079
7080                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7081                                            port*0x18);
7082                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7083
7084                 break;
7085
7086         default:
7087                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7088                           bp->port.link_config);
7089                 return;
7090         }
7091         bp->link_params.phy_addr = bp->port.phy_addr;
7092
7093         /* mask what we support according to speed_cap_mask */
7094         if (!(bp->link_params.speed_cap_mask &
7095                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7096                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7097
7098         if (!(bp->link_params.speed_cap_mask &
7099                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7100                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7101
7102         if (!(bp->link_params.speed_cap_mask &
7103                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7104                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7105
7106         if (!(bp->link_params.speed_cap_mask &
7107                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7108                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7109
7110         if (!(bp->link_params.speed_cap_mask &
7111                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7112                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7113                                         SUPPORTED_1000baseT_Full);
7114
7115         if (!(bp->link_params.speed_cap_mask &
7116                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7117                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7118
7119         if (!(bp->link_params.speed_cap_mask &
7120                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7121                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7122
7123         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7124 }
7125
7126 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7127 {
7128         bp->link_params.req_duplex = DUPLEX_FULL;
7129
7130         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7131         case PORT_FEATURE_LINK_SPEED_AUTO:
7132                 if (bp->port.supported & SUPPORTED_Autoneg) {
7133                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7134                         bp->port.advertising = bp->port.supported;
7135                 } else {
7136                         u32 ext_phy_type =
7137                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7138
7139                         if ((ext_phy_type ==
7140                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7141                             (ext_phy_type ==
7142                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7143                                 /* force 10G, no AN */
7144                                 bp->link_params.req_line_speed = SPEED_10000;
7145                                 bp->port.advertising =
7146                                                 (ADVERTISED_10000baseT_Full |
7147                                                  ADVERTISED_FIBRE);
7148                                 break;
7149                         }
7150                         BNX2X_ERR("NVRAM config error. "
7151                                   "Invalid link_config 0x%x"
7152                                   "  Autoneg not supported\n",
7153                                   bp->port.link_config);
7154                         return;
7155                 }
7156                 break;
7157
7158         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7159                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7160                         bp->link_params.req_line_speed = SPEED_10;
7161                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7162                                                 ADVERTISED_TP);
7163                 } else {
7164                         BNX2X_ERR("NVRAM config error. "
7165                                   "Invalid link_config 0x%x"
7166                                   "  speed_cap_mask 0x%x\n",
7167                                   bp->port.link_config,
7168                                   bp->link_params.speed_cap_mask);
7169                         return;
7170                 }
7171                 break;
7172
7173         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7174                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7175                         bp->link_params.req_line_speed = SPEED_10;
7176                         bp->link_params.req_duplex = DUPLEX_HALF;
7177                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7178                                                 ADVERTISED_TP);
7179                 } else {
7180                         BNX2X_ERR("NVRAM config error. "
7181                                   "Invalid link_config 0x%x"
7182                                   "  speed_cap_mask 0x%x\n",
7183                                   bp->port.link_config,
7184                                   bp->link_params.speed_cap_mask);
7185                         return;
7186                 }
7187                 break;
7188
7189         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7190                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7191                         bp->link_params.req_line_speed = SPEED_100;
7192                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7193                                                 ADVERTISED_TP);
7194                 } else {
7195                         BNX2X_ERR("NVRAM config error. "
7196                                   "Invalid link_config 0x%x"
7197                                   "  speed_cap_mask 0x%x\n",
7198                                   bp->port.link_config,
7199                                   bp->link_params.speed_cap_mask);
7200                         return;
7201                 }
7202                 break;
7203
7204         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7205                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7206                         bp->link_params.req_line_speed = SPEED_100;
7207                         bp->link_params.req_duplex = DUPLEX_HALF;
7208                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7209                                                 ADVERTISED_TP);
7210                 } else {
7211                         BNX2X_ERR("NVRAM config error. "
7212                                   "Invalid link_config 0x%x"
7213                                   "  speed_cap_mask 0x%x\n",
7214                                   bp->port.link_config,
7215                                   bp->link_params.speed_cap_mask);
7216                         return;
7217                 }
7218                 break;
7219
7220         case PORT_FEATURE_LINK_SPEED_1G:
7221                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7222                         bp->link_params.req_line_speed = SPEED_1000;
7223                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7224                                                 ADVERTISED_TP);
7225                 } else {
7226                         BNX2X_ERR("NVRAM config error. "
7227                                   "Invalid link_config 0x%x"
7228                                   "  speed_cap_mask 0x%x\n",
7229                                   bp->port.link_config,
7230                                   bp->link_params.speed_cap_mask);
7231                         return;
7232                 }
7233                 break;
7234
7235         case PORT_FEATURE_LINK_SPEED_2_5G:
7236                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7237                         bp->link_params.req_line_speed = SPEED_2500;
7238                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7239                                                 ADVERTISED_TP);
7240                 } else {
7241                         BNX2X_ERR("NVRAM config error. "
7242                                   "Invalid link_config 0x%x"
7243                                   "  speed_cap_mask 0x%x\n",
7244                                   bp->port.link_config,
7245                                   bp->link_params.speed_cap_mask);
7246                         return;
7247                 }
7248                 break;
7249
7250         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7251         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7252         case PORT_FEATURE_LINK_SPEED_10G_KR:
7253                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7254                         bp->link_params.req_line_speed = SPEED_10000;
7255                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7256                                                 ADVERTISED_FIBRE);
7257                 } else {
7258                         BNX2X_ERR("NVRAM config error. "
7259                                   "Invalid link_config 0x%x"
7260                                   "  speed_cap_mask 0x%x\n",
7261                                   bp->port.link_config,
7262                                   bp->link_params.speed_cap_mask);
7263                         return;
7264                 }
7265                 break;
7266
7267         default:
7268                 BNX2X_ERR("NVRAM config error. "
7269                           "BAD link speed link_config 0x%x\n",
7270                           bp->port.link_config);
7271                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7272                 bp->port.advertising = bp->port.supported;
7273                 break;
7274         }
7275
7276         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7277                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7278         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7279             !(bp->port.supported & SUPPORTED_Autoneg))
7280                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7281
7282         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7283                        "  advertising 0x%x\n",
7284                        bp->link_params.req_line_speed,
7285                        bp->link_params.req_duplex,
7286                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7287 }
7288
7289 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7290 {
7291         int port = BP_PORT(bp);
7292         u32 val, val2;
7293
7294         bp->link_params.bp = bp;
7295         bp->link_params.port = port;
7296
7297         bp->link_params.serdes_config =
7298                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7299         bp->link_params.lane_config =
7300                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7301         bp->link_params.ext_phy_config =
7302                 SHMEM_RD(bp,
7303                          dev_info.port_hw_config[port].external_phy_config);
7304         bp->link_params.speed_cap_mask =
7305                 SHMEM_RD(bp,
7306                          dev_info.port_hw_config[port].speed_capability_mask);
7307
7308         bp->port.link_config =
7309                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7310
7311         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7312              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7313                        "  link_config 0x%08x\n",
7314                        bp->link_params.serdes_config,
7315                        bp->link_params.lane_config,
7316                        bp->link_params.ext_phy_config,
7317                        bp->link_params.speed_cap_mask, bp->port.link_config);
7318
7319         bp->link_params.switch_cfg = (bp->port.link_config &
7320                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7321         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7322
7323         bnx2x_link_settings_requested(bp);
7324
7325         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7326         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7327         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7328         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7329         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7330         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7331         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7332         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7333         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7334         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7335 }
7336
7337 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7338 {
7339         int func = BP_FUNC(bp);
7340         u32 val, val2;
7341         int rc = 0;
7342
7343         bnx2x_get_common_hwinfo(bp);
7344
7345         bp->e1hov = 0;
7346         bp->e1hmf = 0;
7347         if (CHIP_IS_E1H(bp)) {
7348                 bp->mf_config =
7349                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7350
7351                 val =
7352                    (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7353                     FUNC_MF_CFG_E1HOV_TAG_MASK);
7354                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7355
7356                         bp->e1hov = val;
7357                         bp->e1hmf = 1;
7358                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7359                                        "(0x%04x)\n",
7360                                        func, bp->e1hov, bp->e1hov);
7361                 } else {
7362                         BNX2X_DEV_INFO("Single function mode\n");
7363                         if (BP_E1HVN(bp)) {
7364                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7365                                           "  aborting\n", func);
7366                                 rc = -EPERM;
7367                         }
7368                 }
7369         }
7370
7371         if (!BP_NOMCP(bp)) {
7372                 bnx2x_get_port_hwinfo(bp);
7373
7374                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7375                               DRV_MSG_SEQ_NUMBER_MASK);
7376                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7377         }
7378
7379         if (IS_E1HMF(bp)) {
7380                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7381                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7382                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7383                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7384                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7385                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7386                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7387                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7388                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7389                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7390                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7391                                ETH_ALEN);
7392                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7393                                ETH_ALEN);
7394                 }
7395
7396                 return rc;
7397         }
7398
7399         if (BP_NOMCP(bp)) {
7400                 /* only supposed to happen on emulation/FPGA */
7401                 BNX2X_ERR("warning rendom MAC workaround active\n");
7402                 random_ether_addr(bp->dev->dev_addr);
7403                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7404         }
7405
7406         return rc;
7407 }
7408
7409 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7410 {
7411         int func = BP_FUNC(bp);
7412         int rc;
7413
7414         /* Disable interrupt handling until HW is initialized */
7415         atomic_set(&bp->intr_sem, 1);
7416
7417         mutex_init(&bp->port.phy_mutex);
7418
7419         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7420         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7421
7422         rc = bnx2x_get_hwinfo(bp);
7423
7424         /* need to reset chip if undi was active */
7425         if (!BP_NOMCP(bp))
7426                 bnx2x_undi_unload(bp);
7427
7428         if (CHIP_REV_IS_FPGA(bp))
7429                 printk(KERN_ERR PFX "FPGA detected\n");
7430
7431         if (BP_NOMCP(bp) && (func == 0))
7432                 printk(KERN_ERR PFX
7433                        "MCP disabled, must load devices in order!\n");
7434
7435         /* Set TPA flags */
7436         if (disable_tpa) {
7437                 bp->flags &= ~TPA_ENABLE_FLAG;
7438                 bp->dev->features &= ~NETIF_F_LRO;
7439         } else {
7440                 bp->flags |= TPA_ENABLE_FLAG;
7441                 bp->dev->features |= NETIF_F_LRO;
7442         }
7443
7444
7445         bp->tx_ring_size = MAX_TX_AVAIL;
7446         bp->rx_ring_size = MAX_RX_AVAIL;
7447
7448         bp->rx_csum = 1;
7449         bp->rx_offset = 0;
7450
7451         bp->tx_ticks = 50;
7452         bp->rx_ticks = 25;
7453
7454         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7455         bp->current_interval = (poll ? poll : bp->timer_interval);
7456
7457         init_timer(&bp->timer);
7458         bp->timer.expires = jiffies + bp->current_interval;
7459         bp->timer.data = (unsigned long) bp;
7460         bp->timer.function = bnx2x_timer;
7461
7462         return rc;
7463 }
7464
7465 /*
7466  * ethtool service functions
7467  */
7468
7469 /* All ethtool functions called with rtnl_lock */
7470
7471 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7472 {
7473         struct bnx2x *bp = netdev_priv(dev);
7474
7475         cmd->supported = bp->port.supported;
7476         cmd->advertising = bp->port.advertising;
7477
7478         if (netif_carrier_ok(dev)) {
7479                 cmd->speed = bp->link_vars.line_speed;
7480                 cmd->duplex = bp->link_vars.duplex;
7481         } else {
7482                 cmd->speed = bp->link_params.req_line_speed;
7483                 cmd->duplex = bp->link_params.req_duplex;
7484         }
7485         if (IS_E1HMF(bp)) {
7486                 u16 vn_max_rate;
7487
7488                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7489                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7490                 if (vn_max_rate < cmd->speed)
7491                         cmd->speed = vn_max_rate;
7492         }
7493
7494         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7495                 u32 ext_phy_type =
7496                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7497
7498                 switch (ext_phy_type) {
7499                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7500                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7501                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7502                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7503                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7504                         cmd->port = PORT_FIBRE;
7505                         break;
7506
7507                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7508                         cmd->port = PORT_TP;
7509                         break;
7510
7511                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7512                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7513                                   bp->link_params.ext_phy_config);
7514                         break;
7515
7516                 default:
7517                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7518                            bp->link_params.ext_phy_config);
7519                         break;
7520                 }
7521         } else
7522                 cmd->port = PORT_TP;
7523
7524         cmd->phy_address = bp->port.phy_addr;
7525         cmd->transceiver = XCVR_INTERNAL;
7526
7527         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7528                 cmd->autoneg = AUTONEG_ENABLE;
7529         else
7530                 cmd->autoneg = AUTONEG_DISABLE;
7531
7532         cmd->maxtxpkt = 0;
7533         cmd->maxrxpkt = 0;
7534
7535         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7536            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7537            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7538            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7539            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7540            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7541            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7542
7543         return 0;
7544 }
7545
7546 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7547 {
7548         struct bnx2x *bp = netdev_priv(dev);
7549         u32 advertising;
7550
7551         if (IS_E1HMF(bp))
7552                 return 0;
7553
7554         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7555            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7556            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7557            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7558            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7559            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7560            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7561
7562         if (cmd->autoneg == AUTONEG_ENABLE) {
7563                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7564                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7565                         return -EINVAL;
7566                 }
7567
7568                 /* advertise the requested speed and duplex if supported */
7569                 cmd->advertising &= bp->port.supported;
7570
7571                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7572                 bp->link_params.req_duplex = DUPLEX_FULL;
7573                 bp->port.advertising |= (ADVERTISED_Autoneg |
7574                                          cmd->advertising);
7575
7576         } else { /* forced speed */
7577                 /* advertise the requested speed and duplex if supported */
7578                 switch (cmd->speed) {
7579                 case SPEED_10:
7580                         if (cmd->duplex == DUPLEX_FULL) {
7581                                 if (!(bp->port.supported &
7582                                       SUPPORTED_10baseT_Full)) {
7583                                         DP(NETIF_MSG_LINK,
7584                                            "10M full not supported\n");
7585                                         return -EINVAL;
7586                                 }
7587
7588                                 advertising = (ADVERTISED_10baseT_Full |
7589                                                ADVERTISED_TP);
7590                         } else {
7591                                 if (!(bp->port.supported &
7592                                       SUPPORTED_10baseT_Half)) {
7593                                         DP(NETIF_MSG_LINK,
7594                                            "10M half not supported\n");
7595                                         return -EINVAL;
7596                                 }
7597
7598                                 advertising = (ADVERTISED_10baseT_Half |
7599                                                ADVERTISED_TP);
7600                         }
7601                         break;
7602
7603                 case SPEED_100:
7604                         if (cmd->duplex == DUPLEX_FULL) {
7605                                 if (!(bp->port.supported &
7606                                                 SUPPORTED_100baseT_Full)) {
7607                                         DP(NETIF_MSG_LINK,
7608                                            "100M full not supported\n");
7609                                         return -EINVAL;
7610                                 }
7611
7612                                 advertising = (ADVERTISED_100baseT_Full |
7613                                                ADVERTISED_TP);
7614                         } else {
7615                                 if (!(bp->port.supported &
7616                                                 SUPPORTED_100baseT_Half)) {
7617                                         DP(NETIF_MSG_LINK,
7618                                            "100M half not supported\n");
7619                                         return -EINVAL;
7620                                 }
7621
7622                                 advertising = (ADVERTISED_100baseT_Half |
7623                                                ADVERTISED_TP);
7624                         }
7625                         break;
7626
7627                 case SPEED_1000:
7628                         if (cmd->duplex != DUPLEX_FULL) {
7629                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7630                                 return -EINVAL;
7631                         }
7632
7633                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7634                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7635                                 return -EINVAL;
7636                         }
7637
7638                         advertising = (ADVERTISED_1000baseT_Full |
7639                                        ADVERTISED_TP);
7640                         break;
7641
7642                 case SPEED_2500:
7643                         if (cmd->duplex != DUPLEX_FULL) {
7644                                 DP(NETIF_MSG_LINK,
7645                                    "2.5G half not supported\n");
7646                                 return -EINVAL;
7647                         }
7648
7649                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7650                                 DP(NETIF_MSG_LINK,
7651                                    "2.5G full not supported\n");
7652                                 return -EINVAL;
7653                         }
7654
7655                         advertising = (ADVERTISED_2500baseX_Full |
7656                                        ADVERTISED_TP);
7657                         break;
7658
7659                 case SPEED_10000:
7660                         if (cmd->duplex != DUPLEX_FULL) {
7661                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7662                                 return -EINVAL;
7663                         }
7664
7665                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7666                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7667                                 return -EINVAL;
7668                         }
7669
7670                         advertising = (ADVERTISED_10000baseT_Full |
7671                                        ADVERTISED_FIBRE);
7672                         break;
7673
7674                 default:
7675                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7676                         return -EINVAL;
7677                 }
7678
7679                 bp->link_params.req_line_speed = cmd->speed;
7680                 bp->link_params.req_duplex = cmd->duplex;
7681                 bp->port.advertising = advertising;
7682         }
7683
7684         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7685            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7686            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7687            bp->port.advertising);
7688
7689         if (netif_running(dev)) {
7690                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7691                 bnx2x_link_set(bp);
7692         }
7693
7694         return 0;
7695 }
7696
7697 #define PHY_FW_VER_LEN                  10
7698
7699 static void bnx2x_get_drvinfo(struct net_device *dev,
7700                               struct ethtool_drvinfo *info)
7701 {
7702         struct bnx2x *bp = netdev_priv(dev);
7703         char phy_fw_ver[PHY_FW_VER_LEN];
7704
7705         strcpy(info->driver, DRV_MODULE_NAME);
7706         strcpy(info->version, DRV_MODULE_VERSION);
7707
7708         phy_fw_ver[0] = '\0';
7709         if (bp->port.pmf) {
7710                 bnx2x_acquire_phy_lock(bp);
7711                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7712                                              (bp->state != BNX2X_STATE_CLOSED),
7713                                              phy_fw_ver, PHY_FW_VER_LEN);
7714                 bnx2x_release_phy_lock(bp);
7715         }
7716
7717         snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7718                  BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7719                  BCM_5710_FW_REVISION_VERSION,
7720                  BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7721                  ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7722         strcpy(info->bus_info, pci_name(bp->pdev));
7723         info->n_stats = BNX2X_NUM_STATS;
7724         info->testinfo_len = BNX2X_NUM_TESTS;
7725         info->eedump_len = bp->common.flash_size;
7726         info->regdump_len = 0;
7727 }
7728
7729 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7730 {
7731         struct bnx2x *bp = netdev_priv(dev);
7732
7733         if (bp->flags & NO_WOL_FLAG) {
7734                 wol->supported = 0;
7735                 wol->wolopts = 0;
7736         } else {
7737                 wol->supported = WAKE_MAGIC;
7738                 if (bp->wol)
7739                         wol->wolopts = WAKE_MAGIC;
7740                 else
7741                         wol->wolopts = 0;
7742         }
7743         memset(&wol->sopass, 0, sizeof(wol->sopass));
7744 }
7745
7746 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7747 {
7748         struct bnx2x *bp = netdev_priv(dev);
7749
7750         if (wol->wolopts & ~WAKE_MAGIC)
7751                 return -EINVAL;
7752
7753         if (wol->wolopts & WAKE_MAGIC) {
7754                 if (bp->flags & NO_WOL_FLAG)
7755                         return -EINVAL;
7756
7757                 bp->wol = 1;
7758         } else
7759                 bp->wol = 0;
7760
7761         return 0;
7762 }
7763
7764 static u32 bnx2x_get_msglevel(struct net_device *dev)
7765 {
7766         struct bnx2x *bp = netdev_priv(dev);
7767
7768         return bp->msglevel;
7769 }
7770
7771 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7772 {
7773         struct bnx2x *bp = netdev_priv(dev);
7774
7775         if (capable(CAP_NET_ADMIN))
7776                 bp->msglevel = level;
7777 }
7778
7779 static int bnx2x_nway_reset(struct net_device *dev)
7780 {
7781         struct bnx2x *bp = netdev_priv(dev);
7782
7783         if (!bp->port.pmf)
7784                 return 0;
7785
7786         if (netif_running(dev)) {
7787                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7788                 bnx2x_link_set(bp);
7789         }
7790
7791         return 0;
7792 }
7793
7794 static int bnx2x_get_eeprom_len(struct net_device *dev)
7795 {
7796         struct bnx2x *bp = netdev_priv(dev);
7797
7798         return bp->common.flash_size;
7799 }
7800
7801 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7802 {
7803         int port = BP_PORT(bp);
7804         int count, i;
7805         u32 val = 0;
7806
7807         /* adjust timeout for emulation/FPGA */
7808         count = NVRAM_TIMEOUT_COUNT;
7809         if (CHIP_REV_IS_SLOW(bp))
7810                 count *= 100;
7811
7812         /* request access to nvram interface */
7813         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7814                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7815
7816         for (i = 0; i < count*10; i++) {
7817                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7818                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7819                         break;
7820
7821                 udelay(5);
7822         }
7823
7824         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7825                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7826                 return -EBUSY;
7827         }
7828
7829         return 0;
7830 }
7831
7832 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7833 {
7834         int port = BP_PORT(bp);
7835         int count, i;
7836         u32 val = 0;
7837
7838         /* adjust timeout for emulation/FPGA */
7839         count = NVRAM_TIMEOUT_COUNT;
7840         if (CHIP_REV_IS_SLOW(bp))
7841                 count *= 100;
7842
7843         /* relinquish nvram interface */
7844         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7845                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7846
7847         for (i = 0; i < count*10; i++) {
7848                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7849                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7850                         break;
7851
7852                 udelay(5);
7853         }
7854
7855         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7856                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7857                 return -EBUSY;
7858         }
7859
7860         return 0;
7861 }
7862
7863 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7864 {
7865         u32 val;
7866
7867         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7868
7869         /* enable both bits, even on read */
7870         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7871                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7872                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7873 }
7874
7875 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7876 {
7877         u32 val;
7878
7879         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7880
7881         /* disable both bits, even after read */
7882         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7883                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7884                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7885 }
7886
7887 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7888                                   u32 cmd_flags)
7889 {
7890         int count, i, rc;
7891         u32 val;
7892
7893         /* build the command word */
7894         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7895
7896         /* need to clear DONE bit separately */
7897         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7898
7899         /* address of the NVRAM to read from */
7900         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7901                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7902
7903         /* issue a read command */
7904         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7905
7906         /* adjust timeout for emulation/FPGA */
7907         count = NVRAM_TIMEOUT_COUNT;
7908         if (CHIP_REV_IS_SLOW(bp))
7909                 count *= 100;
7910
7911         /* wait for completion */
7912         *ret_val = 0;
7913         rc = -EBUSY;
7914         for (i = 0; i < count; i++) {
7915                 udelay(5);
7916                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7917
7918                 if (val & MCPR_NVM_COMMAND_DONE) {
7919                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7920                         /* we read nvram data in cpu order
7921                          * but ethtool sees it as an array of bytes
7922                          * converting to big-endian will do the work */
7923                         val = cpu_to_be32(val);
7924                         *ret_val = val;
7925                         rc = 0;
7926                         break;
7927                 }
7928         }
7929
7930         return rc;
7931 }
7932
7933 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7934                             int buf_size)
7935 {
7936         int rc;
7937         u32 cmd_flags;
7938         u32 val;
7939
7940         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7941                 DP(BNX2X_MSG_NVM,
7942                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
7943                    offset, buf_size);
7944                 return -EINVAL;
7945         }
7946
7947         if (offset + buf_size > bp->common.flash_size) {
7948                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7949                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7950                    offset, buf_size, bp->common.flash_size);
7951                 return -EINVAL;
7952         }
7953
7954         /* request access to nvram interface */
7955         rc = bnx2x_acquire_nvram_lock(bp);
7956         if (rc)
7957                 return rc;
7958
7959         /* enable access to nvram interface */
7960         bnx2x_enable_nvram_access(bp);
7961
7962         /* read the first word(s) */
7963         cmd_flags = MCPR_NVM_COMMAND_FIRST;
7964         while ((buf_size > sizeof(u32)) && (rc == 0)) {
7965                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7966                 memcpy(ret_buf, &val, 4);
7967
7968                 /* advance to the next dword */
7969                 offset += sizeof(u32);
7970                 ret_buf += sizeof(u32);
7971                 buf_size -= sizeof(u32);
7972                 cmd_flags = 0;
7973         }
7974
7975         if (rc == 0) {
7976                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7977                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7978                 memcpy(ret_buf, &val, 4);
7979         }
7980
7981         /* disable access to nvram interface */
7982         bnx2x_disable_nvram_access(bp);
7983         bnx2x_release_nvram_lock(bp);
7984
7985         return rc;
7986 }
7987
7988 static int bnx2x_get_eeprom(struct net_device *dev,
7989                             struct ethtool_eeprom *eeprom, u8 *eebuf)
7990 {
7991         struct bnx2x *bp = netdev_priv(dev);
7992         int rc;
7993
7994         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7995            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
7996            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7997            eeprom->len, eeprom->len);
7998
7999         /* parameters already validated in ethtool_get_eeprom */
8000
8001         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8002
8003         return rc;
8004 }
8005
8006 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8007                                    u32 cmd_flags)
8008 {
8009         int count, i, rc;
8010
8011         /* build the command word */
8012         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8013
8014         /* need to clear DONE bit separately */
8015         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8016
8017         /* write the data */
8018         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8019
8020         /* address of the NVRAM to write to */
8021         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8022                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8023
8024         /* issue the write command */
8025         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8026
8027         /* adjust timeout for emulation/FPGA */
8028         count = NVRAM_TIMEOUT_COUNT;
8029         if (CHIP_REV_IS_SLOW(bp))
8030                 count *= 100;
8031
8032         /* wait for completion */
8033         rc = -EBUSY;
8034         for (i = 0; i < count; i++) {
8035                 udelay(5);
8036                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8037                 if (val & MCPR_NVM_COMMAND_DONE) {
8038                         rc = 0;
8039                         break;
8040                 }
8041         }
8042
8043         return rc;
8044 }
8045
8046 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8047
8048 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8049                               int buf_size)
8050 {
8051         int rc;
8052         u32 cmd_flags;
8053         u32 align_offset;
8054         u32 val;
8055
8056         if (offset + buf_size > bp->common.flash_size) {
8057                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8058                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8059                    offset, buf_size, bp->common.flash_size);
8060                 return -EINVAL;
8061         }
8062
8063         /* request access to nvram interface */
8064         rc = bnx2x_acquire_nvram_lock(bp);
8065         if (rc)
8066                 return rc;
8067
8068         /* enable access to nvram interface */
8069         bnx2x_enable_nvram_access(bp);
8070
8071         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8072         align_offset = (offset & ~0x03);
8073         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8074
8075         if (rc == 0) {
8076                 val &= ~(0xff << BYTE_OFFSET(offset));
8077                 val |= (*data_buf << BYTE_OFFSET(offset));
8078
8079                 /* nvram data is returned as an array of bytes
8080                  * convert it back to cpu order */
8081                 val = be32_to_cpu(val);
8082
8083                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8084                                              cmd_flags);
8085         }
8086
8087         /* disable access to nvram interface */
8088         bnx2x_disable_nvram_access(bp);
8089         bnx2x_release_nvram_lock(bp);
8090
8091         return rc;
8092 }
8093
8094 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8095                              int buf_size)
8096 {
8097         int rc;
8098         u32 cmd_flags;
8099         u32 val;
8100         u32 written_so_far;
8101
8102         if (buf_size == 1)      /* ethtool */
8103                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8104
8105         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8106                 DP(BNX2X_MSG_NVM,
8107                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8108                    offset, buf_size);
8109                 return -EINVAL;
8110         }
8111
8112         if (offset + buf_size > bp->common.flash_size) {
8113                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8114                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8115                    offset, buf_size, bp->common.flash_size);
8116                 return -EINVAL;
8117         }
8118
8119         /* request access to nvram interface */
8120         rc = bnx2x_acquire_nvram_lock(bp);
8121         if (rc)
8122                 return rc;
8123
8124         /* enable access to nvram interface */
8125         bnx2x_enable_nvram_access(bp);
8126
8127         written_so_far = 0;
8128         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8129         while ((written_so_far < buf_size) && (rc == 0)) {
8130                 if (written_so_far == (buf_size - sizeof(u32)))
8131                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8132                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8133                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8134                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8135                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8136
8137                 memcpy(&val, data_buf, 4);
8138
8139                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8140
8141                 /* advance to the next dword */
8142                 offset += sizeof(u32);
8143                 data_buf += sizeof(u32);
8144                 written_so_far += sizeof(u32);
8145                 cmd_flags = 0;
8146         }
8147
8148         /* disable access to nvram interface */
8149         bnx2x_disable_nvram_access(bp);
8150         bnx2x_release_nvram_lock(bp);
8151
8152         return rc;
8153 }
8154
8155 static int bnx2x_set_eeprom(struct net_device *dev,
8156                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8157 {
8158         struct bnx2x *bp = netdev_priv(dev);
8159         int rc;
8160
8161         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8162            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8163            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8164            eeprom->len, eeprom->len);
8165
8166         /* parameters already validated in ethtool_set_eeprom */
8167
8168         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8169         if (eeprom->magic == 0x00504859)
8170                 if (bp->port.pmf) {
8171
8172                         bnx2x_acquire_phy_lock(bp);
8173                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8174                                              bp->link_params.ext_phy_config,
8175                                              (bp->state != BNX2X_STATE_CLOSED),
8176                                              eebuf, eeprom->len);
8177                         if ((bp->state == BNX2X_STATE_OPEN) ||
8178                             (bp->state == BNX2X_STATE_DISABLED)) {
8179                                 rc |= bnx2x_link_reset(&bp->link_params,
8180                                                        &bp->link_vars);
8181                                 rc |= bnx2x_phy_init(&bp->link_params,
8182                                                      &bp->link_vars);
8183                         }
8184                         bnx2x_release_phy_lock(bp);
8185
8186                 } else /* Only the PMF can access the PHY */
8187                         return -EINVAL;
8188         else
8189                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8190
8191         return rc;
8192 }
8193
8194 static int bnx2x_get_coalesce(struct net_device *dev,
8195                               struct ethtool_coalesce *coal)
8196 {
8197         struct bnx2x *bp = netdev_priv(dev);
8198
8199         memset(coal, 0, sizeof(struct ethtool_coalesce));
8200
8201         coal->rx_coalesce_usecs = bp->rx_ticks;
8202         coal->tx_coalesce_usecs = bp->tx_ticks;
8203
8204         return 0;
8205 }
8206
8207 static int bnx2x_set_coalesce(struct net_device *dev,
8208                               struct ethtool_coalesce *coal)
8209 {
8210         struct bnx2x *bp = netdev_priv(dev);
8211
8212         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8213         if (bp->rx_ticks > 3000)
8214                 bp->rx_ticks = 3000;
8215
8216         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8217         if (bp->tx_ticks > 0x3000)
8218                 bp->tx_ticks = 0x3000;
8219
8220         if (netif_running(dev))
8221                 bnx2x_update_coalesce(bp);
8222
8223         return 0;
8224 }
8225
8226 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8227 {
8228         struct bnx2x *bp = netdev_priv(dev);
8229         int changed = 0;
8230         int rc = 0;
8231
8232         if (data & ETH_FLAG_LRO) {
8233                 if (!(dev->features & NETIF_F_LRO)) {
8234                         dev->features |= NETIF_F_LRO;
8235                         bp->flags |= TPA_ENABLE_FLAG;
8236                         changed = 1;
8237                 }
8238
8239         } else if (dev->features & NETIF_F_LRO) {
8240                 dev->features &= ~NETIF_F_LRO;
8241                 bp->flags &= ~TPA_ENABLE_FLAG;
8242                 changed = 1;
8243         }
8244
8245         if (changed && netif_running(dev)) {
8246                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8247                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8248         }
8249
8250         return rc;
8251 }
8252
8253 static void bnx2x_get_ringparam(struct net_device *dev,
8254                                 struct ethtool_ringparam *ering)
8255 {
8256         struct bnx2x *bp = netdev_priv(dev);
8257
8258         ering->rx_max_pending = MAX_RX_AVAIL;
8259         ering->rx_mini_max_pending = 0;
8260         ering->rx_jumbo_max_pending = 0;
8261
8262         ering->rx_pending = bp->rx_ring_size;
8263         ering->rx_mini_pending = 0;
8264         ering->rx_jumbo_pending = 0;
8265
8266         ering->tx_max_pending = MAX_TX_AVAIL;
8267         ering->tx_pending = bp->tx_ring_size;
8268 }
8269
8270 static int bnx2x_set_ringparam(struct net_device *dev,
8271                                struct ethtool_ringparam *ering)
8272 {
8273         struct bnx2x *bp = netdev_priv(dev);
8274         int rc = 0;
8275
8276         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8277             (ering->tx_pending > MAX_TX_AVAIL) ||
8278             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8279                 return -EINVAL;
8280
8281         bp->rx_ring_size = ering->rx_pending;
8282         bp->tx_ring_size = ering->tx_pending;
8283
8284         if (netif_running(dev)) {
8285                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8286                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8287         }
8288
8289         return rc;
8290 }
8291
8292 static void bnx2x_get_pauseparam(struct net_device *dev,
8293                                  struct ethtool_pauseparam *epause)
8294 {
8295         struct bnx2x *bp = netdev_priv(dev);
8296
8297         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8298                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8299
8300         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8301                             FLOW_CTRL_RX);
8302         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8303                             FLOW_CTRL_TX);
8304
8305         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8306            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8307            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8308 }
8309
8310 static int bnx2x_set_pauseparam(struct net_device *dev,
8311                                 struct ethtool_pauseparam *epause)
8312 {
8313         struct bnx2x *bp = netdev_priv(dev);
8314
8315         if (IS_E1HMF(bp))
8316                 return 0;
8317
8318         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8319            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8320            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8321
8322         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8323
8324         if (epause->rx_pause)
8325                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8326
8327         if (epause->tx_pause)
8328                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8329
8330         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8331                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8332
8333         if (epause->autoneg) {
8334                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8335                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8336                         return -EINVAL;
8337                 }
8338
8339                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8340                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8341         }
8342
8343         DP(NETIF_MSG_LINK,
8344            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8345
8346         if (netif_running(dev)) {
8347                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8348                 bnx2x_link_set(bp);
8349         }
8350
8351         return 0;
8352 }
8353
8354 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8355 {
8356         struct bnx2x *bp = netdev_priv(dev);
8357
8358         return bp->rx_csum;
8359 }
8360
8361 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8362 {
8363         struct bnx2x *bp = netdev_priv(dev);
8364
8365         bp->rx_csum = data;
8366         return 0;
8367 }
8368
8369 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8370 {
8371         if (data) {
8372                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8373                 dev->features |= NETIF_F_TSO6;
8374         } else {
8375                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8376                 dev->features &= ~NETIF_F_TSO6;
8377         }
8378
8379         return 0;
8380 }
8381
8382 static const struct {
8383         char string[ETH_GSTRING_LEN];
8384 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8385         { "register_test (offline)" },
8386         { "memory_test (offline)" },
8387         { "loopback_test (offline)" },
8388         { "nvram_test (online)" },
8389         { "interrupt_test (online)" },
8390         { "link_test (online)" },
8391         { "idle check (online)" },
8392         { "MC errors (online)" }
8393 };
8394
8395 static int bnx2x_self_test_count(struct net_device *dev)
8396 {
8397         return BNX2X_NUM_TESTS;
8398 }
8399
8400 static int bnx2x_test_registers(struct bnx2x *bp)
8401 {
8402         int idx, i, rc = -ENODEV;
8403         u32 wr_val = 0;
8404         int port = BP_PORT(bp);
8405         static const struct {
8406                 u32  offset0;
8407                 u32  offset1;
8408                 u32  mask;
8409         } reg_tbl[] = {
8410 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8411                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8412                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8413                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8414                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8415                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8416                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8417                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8418                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8419                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8420 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8421                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8422                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8423                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8424                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8425                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8426                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8427                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8428                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8429                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8430 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8431                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8432                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8433                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8434                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8435                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8436                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8437                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8438                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8439                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8440 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8441                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8442                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8443                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8444                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8445                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8446                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8447                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8448
8449                 { 0xffffffff, 0, 0x00000000 }
8450         };
8451
8452         if (!netif_running(bp->dev))
8453                 return rc;
8454
8455         /* Repeat the test twice:
8456            First by writing 0x00000000, second by writing 0xffffffff */
8457         for (idx = 0; idx < 2; idx++) {
8458
8459                 switch (idx) {
8460                 case 0:
8461                         wr_val = 0;
8462                         break;
8463                 case 1:
8464                         wr_val = 0xffffffff;
8465                         break;
8466                 }
8467
8468                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8469                         u32 offset, mask, save_val, val;
8470
8471                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8472                         mask = reg_tbl[i].mask;
8473
8474                         save_val = REG_RD(bp, offset);
8475
8476                         REG_WR(bp, offset, wr_val);
8477                         val = REG_RD(bp, offset);
8478
8479                         /* Restore the original register's value */
8480                         REG_WR(bp, offset, save_val);
8481
8482                         /* verify that value is as expected value */
8483                         if ((val & mask) != (wr_val & mask))
8484                                 goto test_reg_exit;
8485                 }
8486         }
8487
8488         rc = 0;
8489
8490 test_reg_exit:
8491         return rc;
8492 }
8493
8494 static int bnx2x_test_memory(struct bnx2x *bp)
8495 {
8496         int i, j, rc = -ENODEV;
8497         u32 val;
8498         static const struct {
8499                 u32 offset;
8500                 int size;
8501         } mem_tbl[] = {
8502                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8503                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8504                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8505                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8506                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8507                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8508                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8509
8510                 { 0xffffffff, 0 }
8511         };
8512         static const struct {
8513                 char *name;
8514                 u32 offset;
8515                 u32 e1_mask;
8516                 u32 e1h_mask;
8517         } prty_tbl[] = {
8518                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8519                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8520                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8521                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8522                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8523                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8524
8525                 { NULL, 0xffffffff, 0, 0 }
8526         };
8527
8528         if (!netif_running(bp->dev))
8529                 return rc;
8530
8531         /* Go through all the memories */
8532         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8533                 for (j = 0; j < mem_tbl[i].size; j++)
8534                         REG_RD(bp, mem_tbl[i].offset + j*4);
8535
8536         /* Check the parity status */
8537         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8538                 val = REG_RD(bp, prty_tbl[i].offset);
8539                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8540                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8541                         DP(NETIF_MSG_HW,
8542                            "%s is 0x%x\n", prty_tbl[i].name, val);
8543                         goto test_mem_exit;
8544                 }
8545         }
8546
8547         rc = 0;
8548
8549 test_mem_exit:
8550         return rc;
8551 }
8552
8553 static void bnx2x_netif_start(struct bnx2x *bp)
8554 {
8555         int i;
8556
8557         if (atomic_dec_and_test(&bp->intr_sem)) {
8558                 if (netif_running(bp->dev)) {
8559                         bnx2x_int_enable(bp);
8560                         for_each_queue(bp, i)
8561                                 napi_enable(&bnx2x_fp(bp, i, napi));
8562                         if (bp->state == BNX2X_STATE_OPEN)
8563                                 netif_wake_queue(bp->dev);
8564                 }
8565         }
8566 }
8567
8568 static void bnx2x_netif_stop(struct bnx2x *bp)
8569 {
8570         int i;
8571
8572         if (netif_running(bp->dev)) {
8573                 netif_tx_disable(bp->dev);
8574                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8575                 for_each_queue(bp, i)
8576                         napi_disable(&bnx2x_fp(bp, i, napi));
8577         }
8578         bnx2x_int_disable_sync(bp);
8579 }
8580
8581 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8582 {
8583         int cnt = 1000;
8584
8585         if (link_up)
8586                 while (bnx2x_link_test(bp) && cnt--)
8587                         msleep(10);
8588 }
8589
8590 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8591 {
8592         unsigned int pkt_size, num_pkts, i;
8593         struct sk_buff *skb;
8594         unsigned char *packet;
8595         struct bnx2x_fastpath *fp = &bp->fp[0];
8596         u16 tx_start_idx, tx_idx;
8597         u16 rx_start_idx, rx_idx;
8598         u16 pkt_prod;
8599         struct sw_tx_bd *tx_buf;
8600         struct eth_tx_bd *tx_bd;
8601         dma_addr_t mapping;
8602         union eth_rx_cqe *cqe;
8603         u8 cqe_fp_flags;
8604         struct sw_rx_bd *rx_buf;
8605         u16 len;
8606         int rc = -ENODEV;
8607
8608         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8609                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8610                 bnx2x_acquire_phy_lock(bp);
8611                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8612                 bnx2x_release_phy_lock(bp);
8613
8614         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8615                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8616                 bnx2x_acquire_phy_lock(bp);
8617                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8618                 bnx2x_release_phy_lock(bp);
8619                 /* wait until link state is restored */
8620                 bnx2x_wait_for_link(bp, link_up);
8621
8622         } else
8623                 return -EINVAL;
8624
8625         pkt_size = 1514;
8626         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8627         if (!skb) {
8628                 rc = -ENOMEM;
8629                 goto test_loopback_exit;
8630         }
8631         packet = skb_put(skb, pkt_size);
8632         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8633         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8634         for (i = ETH_HLEN; i < pkt_size; i++)
8635                 packet[i] = (unsigned char) (i & 0xff);
8636
8637         num_pkts = 0;
8638         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8639         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8640
8641         pkt_prod = fp->tx_pkt_prod++;
8642         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8643         tx_buf->first_bd = fp->tx_bd_prod;
8644         tx_buf->skb = skb;
8645
8646         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8647         mapping = pci_map_single(bp->pdev, skb->data,
8648                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8649         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8650         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8651         tx_bd->nbd = cpu_to_le16(1);
8652         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8653         tx_bd->vlan = cpu_to_le16(pkt_prod);
8654         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8655                                        ETH_TX_BD_FLAGS_END_BD);
8656         tx_bd->general_data = ((UNICAST_ADDRESS <<
8657                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8658
8659         fp->hw_tx_prods->bds_prod =
8660                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8661         mb(); /* FW restriction: must not reorder writing nbd and packets */
8662         fp->hw_tx_prods->packets_prod =
8663                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8664         DOORBELL(bp, FP_IDX(fp), 0);
8665
8666         mmiowb();
8667
8668         num_pkts++;
8669         fp->tx_bd_prod++;
8670         bp->dev->trans_start = jiffies;
8671
8672         udelay(100);
8673
8674         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8675         if (tx_idx != tx_start_idx + num_pkts)
8676                 goto test_loopback_exit;
8677
8678         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8679         if (rx_idx != rx_start_idx + num_pkts)
8680                 goto test_loopback_exit;
8681
8682         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8683         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8684         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8685                 goto test_loopback_rx_exit;
8686
8687         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8688         if (len != pkt_size)
8689                 goto test_loopback_rx_exit;
8690
8691         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8692         skb = rx_buf->skb;
8693         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8694         for (i = ETH_HLEN; i < pkt_size; i++)
8695                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8696                         goto test_loopback_rx_exit;
8697
8698         rc = 0;
8699
8700 test_loopback_rx_exit:
8701         bp->dev->last_rx = jiffies;
8702
8703         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8704         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8705         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8706         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8707
8708         /* Update producers */
8709         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8710                              fp->rx_sge_prod);
8711         mmiowb(); /* keep prod updates ordered */
8712
8713 test_loopback_exit:
8714         bp->link_params.loopback_mode = LOOPBACK_NONE;
8715
8716         return rc;
8717 }
8718
8719 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8720 {
8721         int rc = 0;
8722
8723         if (!netif_running(bp->dev))
8724                 return BNX2X_LOOPBACK_FAILED;
8725
8726         bnx2x_netif_stop(bp);
8727
8728         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8729                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8730                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8731         }
8732
8733         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8734                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8735                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8736         }
8737
8738         bnx2x_netif_start(bp);
8739
8740         return rc;
8741 }
8742
8743 #define CRC32_RESIDUAL                  0xdebb20e3
8744
8745 static int bnx2x_test_nvram(struct bnx2x *bp)
8746 {
8747         static const struct {
8748                 int offset;
8749                 int size;
8750         } nvram_tbl[] = {
8751                 {     0,  0x14 }, /* bootstrap */
8752                 {  0x14,  0xec }, /* dir */
8753                 { 0x100, 0x350 }, /* manuf_info */
8754                 { 0x450,  0xf0 }, /* feature_info */
8755                 { 0x640,  0x64 }, /* upgrade_key_info */
8756                 { 0x6a4,  0x64 },
8757                 { 0x708,  0x70 }, /* manuf_key_info */
8758                 { 0x778,  0x70 },
8759                 {     0,     0 }
8760         };
8761         u32 buf[0x350 / 4];
8762         u8 *data = (u8 *)buf;
8763         int i, rc;
8764         u32 magic, csum;
8765
8766         rc = bnx2x_nvram_read(bp, 0, data, 4);
8767         if (rc) {
8768                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8769                 goto test_nvram_exit;
8770         }
8771
8772         magic = be32_to_cpu(buf[0]);
8773         if (magic != 0x669955aa) {
8774                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8775                 rc = -ENODEV;
8776                 goto test_nvram_exit;
8777         }
8778
8779         for (i = 0; nvram_tbl[i].size; i++) {
8780
8781                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8782                                       nvram_tbl[i].size);
8783                 if (rc) {
8784                         DP(NETIF_MSG_PROBE,
8785                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8786                         goto test_nvram_exit;
8787                 }
8788
8789                 csum = ether_crc_le(nvram_tbl[i].size, data);
8790                 if (csum != CRC32_RESIDUAL) {
8791                         DP(NETIF_MSG_PROBE,
8792                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8793                         rc = -ENODEV;
8794                         goto test_nvram_exit;
8795                 }
8796         }
8797
8798 test_nvram_exit:
8799         return rc;
8800 }
8801
8802 static int bnx2x_test_intr(struct bnx2x *bp)
8803 {
8804         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8805         int i, rc;
8806
8807         if (!netif_running(bp->dev))
8808                 return -ENODEV;
8809
8810         config->hdr.length_6b = 0;
8811         config->hdr.offset = 0;
8812         config->hdr.client_id = BP_CL_ID(bp);
8813         config->hdr.reserved1 = 0;
8814
8815         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8816                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8817                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8818         if (rc == 0) {
8819                 bp->set_mac_pending++;
8820                 for (i = 0; i < 10; i++) {
8821                         if (!bp->set_mac_pending)
8822                                 break;
8823                         msleep_interruptible(10);
8824                 }
8825                 if (i == 10)
8826                         rc = -ENODEV;
8827         }
8828
8829         return rc;
8830 }
8831
8832 static void bnx2x_self_test(struct net_device *dev,
8833                             struct ethtool_test *etest, u64 *buf)
8834 {
8835         struct bnx2x *bp = netdev_priv(dev);
8836
8837         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8838
8839         if (!netif_running(dev))
8840                 return;
8841
8842         /* offline tests are not suppoerted in MF mode */
8843         if (IS_E1HMF(bp))
8844                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8845
8846         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8847                 u8 link_up;
8848
8849                 link_up = bp->link_vars.link_up;
8850                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8851                 bnx2x_nic_load(bp, LOAD_DIAG);
8852                 /* wait until link state is restored */
8853                 bnx2x_wait_for_link(bp, link_up);
8854
8855                 if (bnx2x_test_registers(bp) != 0) {
8856                         buf[0] = 1;
8857                         etest->flags |= ETH_TEST_FL_FAILED;
8858                 }
8859                 if (bnx2x_test_memory(bp) != 0) {
8860                         buf[1] = 1;
8861                         etest->flags |= ETH_TEST_FL_FAILED;
8862                 }
8863                 buf[2] = bnx2x_test_loopback(bp, link_up);
8864                 if (buf[2] != 0)
8865                         etest->flags |= ETH_TEST_FL_FAILED;
8866
8867                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8868                 bnx2x_nic_load(bp, LOAD_NORMAL);
8869                 /* wait until link state is restored */
8870                 bnx2x_wait_for_link(bp, link_up);
8871         }
8872         if (bnx2x_test_nvram(bp) != 0) {
8873                 buf[3] = 1;
8874                 etest->flags |= ETH_TEST_FL_FAILED;
8875         }
8876         if (bnx2x_test_intr(bp) != 0) {
8877                 buf[4] = 1;
8878                 etest->flags |= ETH_TEST_FL_FAILED;
8879         }
8880         if (bp->port.pmf)
8881                 if (bnx2x_link_test(bp) != 0) {
8882                         buf[5] = 1;
8883                         etest->flags |= ETH_TEST_FL_FAILED;
8884                 }
8885         buf[7] = bnx2x_mc_assert(bp);
8886         if (buf[7] != 0)
8887                 etest->flags |= ETH_TEST_FL_FAILED;
8888
8889 #ifdef BNX2X_EXTRA_DEBUG
8890         bnx2x_panic_dump(bp);
8891 #endif
8892 }
8893
8894 static const struct {
8895         long offset;
8896         int size;
8897         u32 flags;
8898 #define STATS_FLAGS_PORT                1
8899 #define STATS_FLAGS_FUNC                2
8900         u8 string[ETH_GSTRING_LEN];
8901 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8902 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8903                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8904         { STATS_OFFSET32(error_bytes_received_hi),
8905                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8906         { STATS_OFFSET32(total_bytes_transmitted_hi),
8907                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8908         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8909                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8910         { STATS_OFFSET32(total_unicast_packets_received_hi),
8911                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8912         { STATS_OFFSET32(total_multicast_packets_received_hi),
8913                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8914         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8915                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8916         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8917                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8918         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8919                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8920 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8921                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8922         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8923                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8924         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8925                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8926         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8927                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8928         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8929                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8930         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8931                                 8, STATS_FLAGS_PORT, "tx_deferred" },
8932         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8933                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8934         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8935                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8936         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8937                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8938         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8939                                 8, STATS_FLAGS_PORT, "rx_fragments" },
8940 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8941                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
8942         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8943                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8944         { STATS_OFFSET32(jabber_packets_received),
8945                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8946         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8947                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8948         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8949                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8950         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8951                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8952         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8953                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8954         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8955                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8956         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8957                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8958         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8959                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8960 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8961                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8962         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8963                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8964         { STATS_OFFSET32(tx_stat_outxonsent_hi),
8965                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8966         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8967                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8968         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8969                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8970         { STATS_OFFSET32(mac_filter_discard),
8971                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8972         { STATS_OFFSET32(no_buff_discard),
8973                                 4, STATS_FLAGS_FUNC, "rx_discards" },
8974         { STATS_OFFSET32(xxoverflow_discard),
8975                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8976         { STATS_OFFSET32(brb_drop_hi),
8977                                 8, STATS_FLAGS_PORT, "brb_discard" },
8978         { STATS_OFFSET32(brb_truncate_hi),
8979                                 8, STATS_FLAGS_PORT, "brb_truncate" },
8980 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8981                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8982         { STATS_OFFSET32(rx_skb_alloc_failed),
8983                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8984 /* 42 */{ STATS_OFFSET32(hw_csum_err),
8985                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8986 };
8987
8988 #define IS_NOT_E1HMF_STAT(bp, i) \
8989                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8990
8991 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8992 {
8993         struct bnx2x *bp = netdev_priv(dev);
8994         int i, j;
8995
8996         switch (stringset) {
8997         case ETH_SS_STATS:
8998                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8999                         if (IS_NOT_E1HMF_STAT(bp, i))
9000                                 continue;
9001                         strcpy(buf + j*ETH_GSTRING_LEN,
9002                                bnx2x_stats_arr[i].string);
9003                         j++;
9004                 }
9005                 break;
9006
9007         case ETH_SS_TEST:
9008                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9009                 break;
9010         }
9011 }
9012
9013 static int bnx2x_get_stats_count(struct net_device *dev)
9014 {
9015         struct bnx2x *bp = netdev_priv(dev);
9016         int i, num_stats = 0;
9017
9018         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9019                 if (IS_NOT_E1HMF_STAT(bp, i))
9020                         continue;
9021                 num_stats++;
9022         }
9023         return num_stats;
9024 }
9025
9026 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9027                                     struct ethtool_stats *stats, u64 *buf)
9028 {
9029         struct bnx2x *bp = netdev_priv(dev);
9030         u32 *hw_stats = (u32 *)&bp->eth_stats;
9031         int i, j;
9032
9033         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9034                 if (IS_NOT_E1HMF_STAT(bp, i))
9035                         continue;
9036
9037                 if (bnx2x_stats_arr[i].size == 0) {
9038                         /* skip this counter */
9039                         buf[j] = 0;
9040                         j++;
9041                         continue;
9042                 }
9043                 if (bnx2x_stats_arr[i].size == 4) {
9044                         /* 4-byte counter */
9045                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9046                         j++;
9047                         continue;
9048                 }
9049                 /* 8-byte counter */
9050                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9051                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9052                 j++;
9053         }
9054 }
9055
9056 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9057 {
9058         struct bnx2x *bp = netdev_priv(dev);
9059         int port = BP_PORT(bp);
9060         int i;
9061
9062         if (!netif_running(dev))
9063                 return 0;
9064
9065         if (!bp->port.pmf)
9066                 return 0;
9067
9068         if (data == 0)
9069                 data = 2;
9070
9071         for (i = 0; i < (data * 2); i++) {
9072                 if ((i % 2) == 0)
9073                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9074                                       bp->link_params.hw_led_mode,
9075                                       bp->link_params.chip_id);
9076                 else
9077                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9078                                       bp->link_params.hw_led_mode,
9079                                       bp->link_params.chip_id);
9080
9081                 msleep_interruptible(500);
9082                 if (signal_pending(current))
9083                         break;
9084         }
9085
9086         if (bp->link_vars.link_up)
9087                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9088                               bp->link_vars.line_speed,
9089                               bp->link_params.hw_led_mode,
9090                               bp->link_params.chip_id);
9091
9092         return 0;
9093 }
9094
9095 static struct ethtool_ops bnx2x_ethtool_ops = {
9096         .get_settings           = bnx2x_get_settings,
9097         .set_settings           = bnx2x_set_settings,
9098         .get_drvinfo            = bnx2x_get_drvinfo,
9099         .get_wol                = bnx2x_get_wol,
9100         .set_wol                = bnx2x_set_wol,
9101         .get_msglevel           = bnx2x_get_msglevel,
9102         .set_msglevel           = bnx2x_set_msglevel,
9103         .nway_reset             = bnx2x_nway_reset,
9104         .get_link               = ethtool_op_get_link,
9105         .get_eeprom_len         = bnx2x_get_eeprom_len,
9106         .get_eeprom             = bnx2x_get_eeprom,
9107         .set_eeprom             = bnx2x_set_eeprom,
9108         .get_coalesce           = bnx2x_get_coalesce,
9109         .set_coalesce           = bnx2x_set_coalesce,
9110         .get_ringparam          = bnx2x_get_ringparam,
9111         .set_ringparam          = bnx2x_set_ringparam,
9112         .get_pauseparam         = bnx2x_get_pauseparam,
9113         .set_pauseparam         = bnx2x_set_pauseparam,
9114         .get_rx_csum            = bnx2x_get_rx_csum,
9115         .set_rx_csum            = bnx2x_set_rx_csum,
9116         .get_tx_csum            = ethtool_op_get_tx_csum,
9117         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9118         .set_flags              = bnx2x_set_flags,
9119         .get_flags              = ethtool_op_get_flags,
9120         .get_sg                 = ethtool_op_get_sg,
9121         .set_sg                 = ethtool_op_set_sg,
9122         .get_tso                = ethtool_op_get_tso,
9123         .set_tso                = bnx2x_set_tso,
9124         .self_test_count        = bnx2x_self_test_count,
9125         .self_test              = bnx2x_self_test,
9126         .get_strings            = bnx2x_get_strings,
9127         .phys_id                = bnx2x_phys_id,
9128         .get_stats_count        = bnx2x_get_stats_count,
9129         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9130 };
9131
9132 /* end of ethtool_ops */
9133
9134 /****************************************************************************
9135 * General service functions
9136 ****************************************************************************/
9137
9138 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9139 {
9140         u16 pmcsr;
9141
9142         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9143
9144         switch (state) {
9145         case PCI_D0:
9146                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9147                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9148                                        PCI_PM_CTRL_PME_STATUS));
9149
9150                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9151                 /* delay required during transition out of D3hot */
9152                         msleep(20);
9153                 break;
9154
9155         case PCI_D3hot:
9156                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9157                 pmcsr |= 3;
9158
9159                 if (bp->wol)
9160                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9161
9162                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9163                                       pmcsr);
9164
9165                 /* No more memory access after this point until
9166                 * device is brought back to D0.
9167                 */
9168                 break;
9169
9170         default:
9171                 return -EINVAL;
9172         }
9173         return 0;
9174 }
9175
9176 /*
9177  * net_device service functions
9178  */
9179
9180 static int bnx2x_poll(struct napi_struct *napi, int budget)
9181 {
9182         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9183                                                  napi);
9184         struct bnx2x *bp = fp->bp;
9185         int work_done = 0;
9186
9187 #ifdef BNX2X_STOP_ON_ERROR
9188         if (unlikely(bp->panic))
9189                 goto poll_panic;
9190 #endif
9191
9192         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9193         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9194         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9195
9196         bnx2x_update_fpsb_idx(fp);
9197
9198         if (BNX2X_HAS_TX_WORK(fp))
9199                 bnx2x_tx_int(fp, budget);
9200
9201         if (BNX2X_HAS_RX_WORK(fp))
9202                 work_done = bnx2x_rx_int(fp, budget);
9203
9204         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9205
9206         /* must not complete if we consumed full budget */
9207         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9208
9209 #ifdef BNX2X_STOP_ON_ERROR
9210 poll_panic:
9211 #endif
9212                 netif_rx_complete(bp->dev, napi);
9213
9214                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9215                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9216                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9217                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9218         }
9219         return work_done;
9220 }
9221
9222
9223 /* we split the first BD into headers and data BDs
9224  * to ease the pain of our fellow micocode engineers
9225  * we use one mapping for both BDs
9226  * So far this has only been observed to happen
9227  * in Other Operating Systems(TM)
9228  */
9229 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9230                                    struct bnx2x_fastpath *fp,
9231                                    struct eth_tx_bd **tx_bd, u16 hlen,
9232                                    u16 bd_prod, int nbd)
9233 {
9234         struct eth_tx_bd *h_tx_bd = *tx_bd;
9235         struct eth_tx_bd *d_tx_bd;
9236         dma_addr_t mapping;
9237         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9238
9239         /* first fix first BD */
9240         h_tx_bd->nbd = cpu_to_le16(nbd);
9241         h_tx_bd->nbytes = cpu_to_le16(hlen);
9242
9243         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9244            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9245            h_tx_bd->addr_lo, h_tx_bd->nbd);
9246
9247         /* now get a new data BD
9248          * (after the pbd) and fill it */
9249         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9250         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9251
9252         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9253                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9254
9255         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9256         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9257         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9258         d_tx_bd->vlan = 0;
9259         /* this marks the BD as one that has no individual mapping
9260          * the FW ignores this flag in a BD not marked start
9261          */
9262         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9263         DP(NETIF_MSG_TX_QUEUED,
9264            "TSO split data size is %d (%x:%x)\n",
9265            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9266
9267         /* update tx_bd for marking the last BD flag */
9268         *tx_bd = d_tx_bd;
9269
9270         return bd_prod;
9271 }
9272
9273 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9274 {
9275         if (fix > 0)
9276                 csum = (u16) ~csum_fold(csum_sub(csum,
9277                                 csum_partial(t_header - fix, fix, 0)));
9278
9279         else if (fix < 0)
9280                 csum = (u16) ~csum_fold(csum_add(csum,
9281                                 csum_partial(t_header, -fix, 0)));
9282
9283         return swab16(csum);
9284 }
9285
9286 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9287 {
9288         u32 rc;
9289
9290         if (skb->ip_summed != CHECKSUM_PARTIAL)
9291                 rc = XMIT_PLAIN;
9292
9293         else {
9294                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9295                         rc = XMIT_CSUM_V6;
9296                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9297                                 rc |= XMIT_CSUM_TCP;
9298
9299                 } else {
9300                         rc = XMIT_CSUM_V4;
9301                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9302                                 rc |= XMIT_CSUM_TCP;
9303                 }
9304         }
9305
9306         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9307                 rc |= XMIT_GSO_V4;
9308
9309         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9310                 rc |= XMIT_GSO_V6;
9311
9312         return rc;
9313 }
9314
9315 /* check if packet requires linearization (packet is too fragmented) */
9316 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9317                              u32 xmit_type)
9318 {
9319         int to_copy = 0;
9320         int hlen = 0;
9321         int first_bd_sz = 0;
9322
9323         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9324         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9325
9326                 if (xmit_type & XMIT_GSO) {
9327                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9328                         /* Check if LSO packet needs to be copied:
9329                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9330                         int wnd_size = MAX_FETCH_BD - 3;
9331                         /* Number of widnows to check */
9332                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9333                         int wnd_idx = 0;
9334                         int frag_idx = 0;
9335                         u32 wnd_sum = 0;
9336
9337                         /* Headers length */
9338                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9339                                 tcp_hdrlen(skb);
9340
9341                         /* Amount of data (w/o headers) on linear part of SKB*/
9342                         first_bd_sz = skb_headlen(skb) - hlen;
9343
9344                         wnd_sum  = first_bd_sz;
9345
9346                         /* Calculate the first sum - it's special */
9347                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9348                                 wnd_sum +=
9349                                         skb_shinfo(skb)->frags[frag_idx].size;
9350
9351                         /* If there was data on linear skb data - check it */
9352                         if (first_bd_sz > 0) {
9353                                 if (unlikely(wnd_sum < lso_mss)) {
9354                                         to_copy = 1;
9355                                         goto exit_lbl;
9356                                 }
9357
9358                                 wnd_sum -= first_bd_sz;
9359                         }
9360
9361                         /* Others are easier: run through the frag list and
9362                            check all windows */
9363                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9364                                 wnd_sum +=
9365                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9366
9367                                 if (unlikely(wnd_sum < lso_mss)) {
9368                                         to_copy = 1;
9369                                         break;
9370                                 }
9371                                 wnd_sum -=
9372                                         skb_shinfo(skb)->frags[wnd_idx].size;
9373                         }
9374
9375                 } else {
9376                         /* in non-LSO too fragmented packet should always
9377                            be linearized */
9378                         to_copy = 1;
9379                 }
9380         }
9381
9382 exit_lbl:
9383         if (unlikely(to_copy))
9384                 DP(NETIF_MSG_TX_QUEUED,
9385                    "Linearization IS REQUIRED for %s packet. "
9386                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9387                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9388                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9389
9390         return to_copy;
9391 }
9392
9393 /* called with netif_tx_lock
9394  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9395  * netif_wake_queue()
9396  */
9397 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9398 {
9399         struct bnx2x *bp = netdev_priv(dev);
9400         struct bnx2x_fastpath *fp;
9401         struct sw_tx_bd *tx_buf;
9402         struct eth_tx_bd *tx_bd;
9403         struct eth_tx_parse_bd *pbd = NULL;
9404         u16 pkt_prod, bd_prod;
9405         int nbd, fp_index;
9406         dma_addr_t mapping;
9407         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9408         int vlan_off = (bp->e1hov ? 4 : 0);
9409         int i;
9410         u8 hlen = 0;
9411
9412 #ifdef BNX2X_STOP_ON_ERROR
9413         if (unlikely(bp->panic))
9414                 return NETDEV_TX_BUSY;
9415 #endif
9416
9417         fp_index = (smp_processor_id() % bp->num_queues);
9418         fp = &bp->fp[fp_index];
9419
9420         if (unlikely(bnx2x_tx_avail(bp->fp) <
9421                                         (skb_shinfo(skb)->nr_frags + 3))) {
9422                 bp->eth_stats.driver_xoff++,
9423                 netif_stop_queue(dev);
9424                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9425                 return NETDEV_TX_BUSY;
9426         }
9427
9428         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9429            "  gso type %x  xmit_type %x\n",
9430            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9431            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9432
9433         /* First, check if we need to linearaize the skb
9434            (due to FW restrictions) */
9435         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9436                 /* Statistics of linearization */
9437                 bp->lin_cnt++;
9438                 if (skb_linearize(skb) != 0) {
9439                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9440                            "silently dropping this SKB\n");
9441                         dev_kfree_skb_any(skb);
9442                         return NETDEV_TX_OK;
9443                 }
9444         }
9445
9446         /*
9447         Please read carefully. First we use one BD which we mark as start,
9448         then for TSO or xsum we have a parsing info BD,
9449         and only then we have the rest of the TSO BDs.
9450         (don't forget to mark the last one as last,
9451         and to unmap only AFTER you write to the BD ...)
9452         And above all, all pdb sizes are in words - NOT DWORDS!
9453         */
9454
9455         pkt_prod = fp->tx_pkt_prod++;
9456         bd_prod = TX_BD(fp->tx_bd_prod);
9457
9458         /* get a tx_buf and first BD */
9459         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9460         tx_bd = &fp->tx_desc_ring[bd_prod];
9461
9462         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9463         tx_bd->general_data = (UNICAST_ADDRESS <<
9464                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9465         tx_bd->general_data |= 1; /* header nbd */
9466
9467         /* remember the first BD of the packet */
9468         tx_buf->first_bd = fp->tx_bd_prod;
9469         tx_buf->skb = skb;
9470
9471         DP(NETIF_MSG_TX_QUEUED,
9472            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9473            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9474
9475         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9476                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9477                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9478                 vlan_off += 4;
9479         } else
9480                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9481
9482         if (xmit_type) {
9483
9484                 /* turn on parsing and get a BD */
9485                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9486                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9487
9488                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9489         }
9490
9491         if (xmit_type & XMIT_CSUM) {
9492                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9493
9494                 /* for now NS flag is not used in Linux */
9495                 pbd->global_data = (hlen |
9496                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9497                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9498
9499                 pbd->ip_hlen = (skb_transport_header(skb) -
9500                                 skb_network_header(skb)) / 2;
9501
9502                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9503
9504                 pbd->total_hlen = cpu_to_le16(hlen);
9505                 hlen = hlen*2 - vlan_off;
9506
9507                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9508
9509                 if (xmit_type & XMIT_CSUM_V4)
9510                         tx_bd->bd_flags.as_bitfield |=
9511                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9512                 else
9513                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9514
9515                 if (xmit_type & XMIT_CSUM_TCP) {
9516                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9517
9518                 } else {
9519                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9520
9521                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9522                         pbd->cs_offset = fix / 2;
9523
9524                         DP(NETIF_MSG_TX_QUEUED,
9525                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9526                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9527                            SKB_CS(skb));
9528
9529                         /* HW bug: fixup the CSUM */
9530                         pbd->tcp_pseudo_csum =
9531                                 bnx2x_csum_fix(skb_transport_header(skb),
9532                                                SKB_CS(skb), fix);
9533
9534                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9535                            pbd->tcp_pseudo_csum);
9536                 }
9537         }
9538
9539         mapping = pci_map_single(bp->pdev, skb->data,
9540                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9541
9542         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9543         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9544         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9545         tx_bd->nbd = cpu_to_le16(nbd);
9546         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9547
9548         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9549            "  nbytes %d  flags %x  vlan %x\n",
9550            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9551            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9552            le16_to_cpu(tx_bd->vlan));
9553
9554         if (xmit_type & XMIT_GSO) {
9555
9556                 DP(NETIF_MSG_TX_QUEUED,
9557                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9558                    skb->len, hlen, skb_headlen(skb),
9559                    skb_shinfo(skb)->gso_size);
9560
9561                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9562
9563                 if (unlikely(skb_headlen(skb) > hlen))
9564                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9565                                                  bd_prod, ++nbd);
9566
9567                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9568                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9569                 pbd->tcp_flags = pbd_tcp_flags(skb);
9570
9571                 if (xmit_type & XMIT_GSO_V4) {
9572                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9573                         pbd->tcp_pseudo_csum =
9574                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9575                                                           ip_hdr(skb)->daddr,
9576                                                           0, IPPROTO_TCP, 0));
9577
9578                 } else
9579                         pbd->tcp_pseudo_csum =
9580                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9581                                                         &ipv6_hdr(skb)->daddr,
9582                                                         0, IPPROTO_TCP, 0));
9583
9584                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9585         }
9586
9587         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9588                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9589
9590                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9591                 tx_bd = &fp->tx_desc_ring[bd_prod];
9592
9593                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9594                                        frag->size, PCI_DMA_TODEVICE);
9595
9596                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9597                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9598                 tx_bd->nbytes = cpu_to_le16(frag->size);
9599                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9600                 tx_bd->bd_flags.as_bitfield = 0;
9601
9602                 DP(NETIF_MSG_TX_QUEUED,
9603                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9604                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9605                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9606         }
9607
9608         /* now at last mark the BD as the last BD */
9609         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9610
9611         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9612            tx_bd, tx_bd->bd_flags.as_bitfield);
9613
9614         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9615
9616         /* now send a tx doorbell, counting the next BD
9617          * if the packet contains or ends with it
9618          */
9619         if (TX_BD_POFF(bd_prod) < nbd)
9620                 nbd++;
9621
9622         if (pbd)
9623                 DP(NETIF_MSG_TX_QUEUED,
9624                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9625                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9626                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9627                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9628                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9629
9630         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9631
9632         fp->hw_tx_prods->bds_prod =
9633                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9634         mb(); /* FW restriction: must not reorder writing nbd and packets */
9635         fp->hw_tx_prods->packets_prod =
9636                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9637         DOORBELL(bp, FP_IDX(fp), 0);
9638
9639         mmiowb();
9640
9641         fp->tx_bd_prod += nbd;
9642         dev->trans_start = jiffies;
9643
9644         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9645                 netif_stop_queue(dev);
9646                 bp->eth_stats.driver_xoff++;
9647                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9648                         netif_wake_queue(dev);
9649         }
9650         fp->tx_pkt++;
9651
9652         return NETDEV_TX_OK;
9653 }
9654
9655 /* called with rtnl_lock */
9656 static int bnx2x_open(struct net_device *dev)
9657 {
9658         struct bnx2x *bp = netdev_priv(dev);
9659
9660         bnx2x_set_power_state(bp, PCI_D0);
9661
9662         return bnx2x_nic_load(bp, LOAD_OPEN);
9663 }
9664
9665 /* called with rtnl_lock */
9666 static int bnx2x_close(struct net_device *dev)
9667 {
9668         struct bnx2x *bp = netdev_priv(dev);
9669
9670         /* Unload the driver, release IRQs */
9671         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9672         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9673                 if (!CHIP_REV_IS_SLOW(bp))
9674                         bnx2x_set_power_state(bp, PCI_D3hot);
9675
9676         return 0;
9677 }
9678
9679 /* called with netif_tx_lock from set_multicast */
9680 static void bnx2x_set_rx_mode(struct net_device *dev)
9681 {
9682         struct bnx2x *bp = netdev_priv(dev);
9683         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9684         int port = BP_PORT(bp);
9685
9686         if (bp->state != BNX2X_STATE_OPEN) {
9687                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9688                 return;
9689         }
9690
9691         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9692
9693         if (dev->flags & IFF_PROMISC)
9694                 rx_mode = BNX2X_RX_MODE_PROMISC;
9695
9696         else if ((dev->flags & IFF_ALLMULTI) ||
9697                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9698                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9699
9700         else { /* some multicasts */
9701                 if (CHIP_IS_E1(bp)) {
9702                         int i, old, offset;
9703                         struct dev_mc_list *mclist;
9704                         struct mac_configuration_cmd *config =
9705                                                 bnx2x_sp(bp, mcast_config);
9706
9707                         for (i = 0, mclist = dev->mc_list;
9708                              mclist && (i < dev->mc_count);
9709                              i++, mclist = mclist->next) {
9710
9711                                 config->config_table[i].
9712                                         cam_entry.msb_mac_addr =
9713                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9714                                 config->config_table[i].
9715                                         cam_entry.middle_mac_addr =
9716                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9717                                 config->config_table[i].
9718                                         cam_entry.lsb_mac_addr =
9719                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9720                                 config->config_table[i].cam_entry.flags =
9721                                                         cpu_to_le16(port);
9722                                 config->config_table[i].
9723                                         target_table_entry.flags = 0;
9724                                 config->config_table[i].
9725                                         target_table_entry.client_id = 0;
9726                                 config->config_table[i].
9727                                         target_table_entry.vlan_id = 0;
9728
9729                                 DP(NETIF_MSG_IFUP,
9730                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9731                                    config->config_table[i].
9732                                                 cam_entry.msb_mac_addr,
9733                                    config->config_table[i].
9734                                                 cam_entry.middle_mac_addr,
9735                                    config->config_table[i].
9736                                                 cam_entry.lsb_mac_addr);
9737                         }
9738                         old = config->hdr.length_6b;
9739                         if (old > i) {
9740                                 for (; i < old; i++) {
9741                                         if (CAM_IS_INVALID(config->
9742                                                            config_table[i])) {
9743                                                 i--; /* already invalidated */
9744                                                 break;
9745                                         }
9746                                         /* invalidate */
9747                                         CAM_INVALIDATE(config->
9748                                                        config_table[i]);
9749                                 }
9750                         }
9751
9752                         if (CHIP_REV_IS_SLOW(bp))
9753                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9754                         else
9755                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9756
9757                         config->hdr.length_6b = i;
9758                         config->hdr.offset = offset;
9759                         config->hdr.client_id = BP_CL_ID(bp);
9760                         config->hdr.reserved1 = 0;
9761
9762                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9763                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9764                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9765                                       0);
9766                 } else { /* E1H */
9767                         /* Accept one or more multicasts */
9768                         struct dev_mc_list *mclist;
9769                         u32 mc_filter[MC_HASH_SIZE];
9770                         u32 crc, bit, regidx;
9771                         int i;
9772
9773                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9774
9775                         for (i = 0, mclist = dev->mc_list;
9776                              mclist && (i < dev->mc_count);
9777                              i++, mclist = mclist->next) {
9778
9779                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9780                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9781                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9782                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9783                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9784
9785                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9786                                 bit = (crc >> 24) & 0xff;
9787                                 regidx = bit >> 5;
9788                                 bit &= 0x1f;
9789                                 mc_filter[regidx] |= (1 << bit);
9790                         }
9791
9792                         for (i = 0; i < MC_HASH_SIZE; i++)
9793                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9794                                        mc_filter[i]);
9795                 }
9796         }
9797
9798         bp->rx_mode = rx_mode;
9799         bnx2x_set_storm_rx_mode(bp);
9800 }
9801
9802 /* called with rtnl_lock */
9803 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9804 {
9805         struct sockaddr *addr = p;
9806         struct bnx2x *bp = netdev_priv(dev);
9807
9808         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9809                 return -EINVAL;
9810
9811         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9812         if (netif_running(dev)) {
9813                 if (CHIP_IS_E1(bp))
9814                         bnx2x_set_mac_addr_e1(bp);
9815                 else
9816                         bnx2x_set_mac_addr_e1h(bp);
9817         }
9818
9819         return 0;
9820 }
9821
9822 /* called with rtnl_lock */
9823 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9824 {
9825         struct mii_ioctl_data *data = if_mii(ifr);
9826         struct bnx2x *bp = netdev_priv(dev);
9827         int err;
9828
9829         switch (cmd) {
9830         case SIOCGMIIPHY:
9831                 data->phy_id = bp->port.phy_addr;
9832
9833                 /* fallthrough */
9834
9835         case SIOCGMIIREG: {
9836                 u16 mii_regval;
9837
9838                 if (!netif_running(dev))
9839                         return -EAGAIN;
9840
9841                 mutex_lock(&bp->port.phy_mutex);
9842                 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9843                                       DEFAULT_PHY_DEV_ADDR,
9844                                       (data->reg_num & 0x1f), &mii_regval);
9845                 data->val_out = mii_regval;
9846                 mutex_unlock(&bp->port.phy_mutex);
9847                 return err;
9848         }
9849
9850         case SIOCSMIIREG:
9851                 if (!capable(CAP_NET_ADMIN))
9852                         return -EPERM;
9853
9854                 if (!netif_running(dev))
9855                         return -EAGAIN;
9856
9857                 mutex_lock(&bp->port.phy_mutex);
9858                 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9859                                        DEFAULT_PHY_DEV_ADDR,
9860                                        (data->reg_num & 0x1f), data->val_in);
9861                 mutex_unlock(&bp->port.phy_mutex);
9862                 return err;
9863
9864         default:
9865                 /* do nothing */
9866                 break;
9867         }
9868
9869         return -EOPNOTSUPP;
9870 }
9871
9872 /* called with rtnl_lock */
9873 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9874 {
9875         struct bnx2x *bp = netdev_priv(dev);
9876         int rc = 0;
9877
9878         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9879             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9880                 return -EINVAL;
9881
9882         /* This does not race with packet allocation
9883          * because the actual alloc size is
9884          * only updated as part of load
9885          */
9886         dev->mtu = new_mtu;
9887
9888         if (netif_running(dev)) {
9889                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9890                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9891         }
9892
9893         return rc;
9894 }
9895
9896 static void bnx2x_tx_timeout(struct net_device *dev)
9897 {
9898         struct bnx2x *bp = netdev_priv(dev);
9899
9900 #ifdef BNX2X_STOP_ON_ERROR
9901         if (!bp->panic)
9902                 bnx2x_panic();
9903 #endif
9904         /* This allows the netif to be shutdown gracefully before resetting */
9905         schedule_work(&bp->reset_task);
9906 }
9907
9908 #ifdef BCM_VLAN
9909 /* called with rtnl_lock */
9910 static void bnx2x_vlan_rx_register(struct net_device *dev,
9911                                    struct vlan_group *vlgrp)
9912 {
9913         struct bnx2x *bp = netdev_priv(dev);
9914
9915         bp->vlgrp = vlgrp;
9916         if (netif_running(dev))
9917                 bnx2x_set_client_config(bp);
9918 }
9919
9920 #endif
9921
9922 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9923 static void poll_bnx2x(struct net_device *dev)
9924 {
9925         struct bnx2x *bp = netdev_priv(dev);
9926
9927         disable_irq(bp->pdev->irq);
9928         bnx2x_interrupt(bp->pdev->irq, dev);
9929         enable_irq(bp->pdev->irq);
9930 }
9931 #endif
9932
9933 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9934                                     struct net_device *dev)
9935 {
9936         struct bnx2x *bp;
9937         int rc;
9938
9939         SET_NETDEV_DEV(dev, &pdev->dev);
9940         bp = netdev_priv(dev);
9941
9942         bp->dev = dev;
9943         bp->pdev = pdev;
9944         bp->flags = 0;
9945         bp->func = PCI_FUNC(pdev->devfn);
9946
9947         rc = pci_enable_device(pdev);
9948         if (rc) {
9949                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9950                 goto err_out;
9951         }
9952
9953         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9954                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9955                        " aborting\n");
9956                 rc = -ENODEV;
9957                 goto err_out_disable;
9958         }
9959
9960         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9961                 printk(KERN_ERR PFX "Cannot find second PCI device"
9962                        " base address, aborting\n");
9963                 rc = -ENODEV;
9964                 goto err_out_disable;
9965         }
9966
9967         if (atomic_read(&pdev->enable_cnt) == 1) {
9968                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9969                 if (rc) {
9970                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9971                                " aborting\n");
9972                         goto err_out_disable;
9973                 }
9974
9975                 pci_set_master(pdev);
9976                 pci_save_state(pdev);
9977         }
9978
9979         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9980         if (bp->pm_cap == 0) {
9981                 printk(KERN_ERR PFX "Cannot find power management"
9982                        " capability, aborting\n");
9983                 rc = -EIO;
9984                 goto err_out_release;
9985         }
9986
9987         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9988         if (bp->pcie_cap == 0) {
9989                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9990                        " aborting\n");
9991                 rc = -EIO;
9992                 goto err_out_release;
9993         }
9994
9995         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9996                 bp->flags |= USING_DAC_FLAG;
9997                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9998                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9999                                " failed, aborting\n");
10000                         rc = -EIO;
10001                         goto err_out_release;
10002                 }
10003
10004         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10005                 printk(KERN_ERR PFX "System does not support DMA,"
10006                        " aborting\n");
10007                 rc = -EIO;
10008                 goto err_out_release;
10009         }
10010
10011         dev->mem_start = pci_resource_start(pdev, 0);
10012         dev->base_addr = dev->mem_start;
10013         dev->mem_end = pci_resource_end(pdev, 0);
10014
10015         dev->irq = pdev->irq;
10016
10017         bp->regview = ioremap_nocache(dev->base_addr,
10018                                       pci_resource_len(pdev, 0));
10019         if (!bp->regview) {
10020                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10021                 rc = -ENOMEM;
10022                 goto err_out_release;
10023         }
10024
10025         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10026                                         min_t(u64, BNX2X_DB_SIZE,
10027                                               pci_resource_len(pdev, 2)));
10028         if (!bp->doorbells) {
10029                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10030                 rc = -ENOMEM;
10031                 goto err_out_unmap;
10032         }
10033
10034         bnx2x_set_power_state(bp, PCI_D0);
10035
10036         /* clean indirect addresses */
10037         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10038                                PCICFG_VENDOR_ID_OFFSET);
10039         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10040         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10041         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10042         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10043
10044         dev->hard_start_xmit = bnx2x_start_xmit;
10045         dev->watchdog_timeo = TX_TIMEOUT;
10046
10047         dev->ethtool_ops = &bnx2x_ethtool_ops;
10048         dev->open = bnx2x_open;
10049         dev->stop = bnx2x_close;
10050         dev->set_multicast_list = bnx2x_set_rx_mode;
10051         dev->set_mac_address = bnx2x_change_mac_addr;
10052         dev->do_ioctl = bnx2x_ioctl;
10053         dev->change_mtu = bnx2x_change_mtu;
10054         dev->tx_timeout = bnx2x_tx_timeout;
10055 #ifdef BCM_VLAN
10056         dev->vlan_rx_register = bnx2x_vlan_rx_register;
10057 #endif
10058 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10059         dev->poll_controller = poll_bnx2x;
10060 #endif
10061         dev->features |= NETIF_F_SG;
10062         dev->features |= NETIF_F_HW_CSUM;
10063         if (bp->flags & USING_DAC_FLAG)
10064                 dev->features |= NETIF_F_HIGHDMA;
10065 #ifdef BCM_VLAN
10066         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10067 #endif
10068         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10069         dev->features |= NETIF_F_TSO6;
10070
10071         return 0;
10072
10073 err_out_unmap:
10074         if (bp->regview) {
10075                 iounmap(bp->regview);
10076                 bp->regview = NULL;
10077         }
10078         if (bp->doorbells) {
10079                 iounmap(bp->doorbells);
10080                 bp->doorbells = NULL;
10081         }
10082
10083 err_out_release:
10084         if (atomic_read(&pdev->enable_cnt) == 1)
10085                 pci_release_regions(pdev);
10086
10087 err_out_disable:
10088         pci_disable_device(pdev);
10089         pci_set_drvdata(pdev, NULL);
10090
10091 err_out:
10092         return rc;
10093 }
10094
10095 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10096 {
10097         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10098
10099         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10100         return val;
10101 }
10102
10103 /* return value of 1=2.5GHz 2=5GHz */
10104 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10105 {
10106         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10107
10108         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10109         return val;
10110 }
10111
10112 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10113                                     const struct pci_device_id *ent)
10114 {
10115         static int version_printed;
10116         struct net_device *dev = NULL;
10117         struct bnx2x *bp;
10118         int rc;
10119         DECLARE_MAC_BUF(mac);
10120
10121         if (version_printed++ == 0)
10122                 printk(KERN_INFO "%s", version);
10123
10124         /* dev zeroed in init_etherdev */
10125         dev = alloc_etherdev(sizeof(*bp));
10126         if (!dev) {
10127                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10128                 return -ENOMEM;
10129         }
10130
10131         netif_carrier_off(dev);
10132
10133         bp = netdev_priv(dev);
10134         bp->msglevel = debug;
10135
10136         rc = bnx2x_init_dev(pdev, dev);
10137         if (rc < 0) {
10138                 free_netdev(dev);
10139                 return rc;
10140         }
10141
10142         rc = register_netdev(dev);
10143         if (rc) {
10144                 dev_err(&pdev->dev, "Cannot register net device\n");
10145                 goto init_one_exit;
10146         }
10147
10148         pci_set_drvdata(pdev, dev);
10149
10150         rc = bnx2x_init_bp(bp);
10151         if (rc) {
10152                 unregister_netdev(dev);
10153                 goto init_one_exit;
10154         }
10155
10156         bp->common.name = board_info[ent->driver_data].name;
10157         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10158                " IRQ %d, ", dev->name, bp->common.name,
10159                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10160                bnx2x_get_pcie_width(bp),
10161                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10162                dev->base_addr, bp->pdev->irq);
10163         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10164         return 0;
10165
10166 init_one_exit:
10167         if (bp->regview)
10168                 iounmap(bp->regview);
10169
10170         if (bp->doorbells)
10171                 iounmap(bp->doorbells);
10172
10173         free_netdev(dev);
10174
10175         if (atomic_read(&pdev->enable_cnt) == 1)
10176                 pci_release_regions(pdev);
10177
10178         pci_disable_device(pdev);
10179         pci_set_drvdata(pdev, NULL);
10180
10181         return rc;
10182 }
10183
10184 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10185 {
10186         struct net_device *dev = pci_get_drvdata(pdev);
10187         struct bnx2x *bp;
10188
10189         if (!dev) {
10190                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10191                 return;
10192         }
10193         bp = netdev_priv(dev);
10194
10195         unregister_netdev(dev);
10196
10197         if (bp->regview)
10198                 iounmap(bp->regview);
10199
10200         if (bp->doorbells)
10201                 iounmap(bp->doorbells);
10202
10203         free_netdev(dev);
10204
10205         if (atomic_read(&pdev->enable_cnt) == 1)
10206                 pci_release_regions(pdev);
10207
10208         pci_disable_device(pdev);
10209         pci_set_drvdata(pdev, NULL);
10210 }
10211
10212 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10213 {
10214         struct net_device *dev = pci_get_drvdata(pdev);
10215         struct bnx2x *bp;
10216
10217         if (!dev) {
10218                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10219                 return -ENODEV;
10220         }
10221         bp = netdev_priv(dev);
10222
10223         rtnl_lock();
10224
10225         pci_save_state(pdev);
10226
10227         if (!netif_running(dev)) {
10228                 rtnl_unlock();
10229                 return 0;
10230         }
10231
10232         netif_device_detach(dev);
10233
10234         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10235
10236         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10237
10238         rtnl_unlock();
10239
10240         return 0;
10241 }
10242
10243 static int bnx2x_resume(struct pci_dev *pdev)
10244 {
10245         struct net_device *dev = pci_get_drvdata(pdev);
10246         struct bnx2x *bp;
10247         int rc;
10248
10249         if (!dev) {
10250                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10251                 return -ENODEV;
10252         }
10253         bp = netdev_priv(dev);
10254
10255         rtnl_lock();
10256
10257         pci_restore_state(pdev);
10258
10259         if (!netif_running(dev)) {
10260                 rtnl_unlock();
10261                 return 0;
10262         }
10263
10264         bnx2x_set_power_state(bp, PCI_D0);
10265         netif_device_attach(dev);
10266
10267         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10268
10269         rtnl_unlock();
10270
10271         return rc;
10272 }
10273
10274 /**
10275  * bnx2x_io_error_detected - called when PCI error is detected
10276  * @pdev: Pointer to PCI device
10277  * @state: The current pci connection state
10278  *
10279  * This function is called after a PCI bus error affecting
10280  * this device has been detected.
10281  */
10282 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10283                                                 pci_channel_state_t state)
10284 {
10285         struct net_device *dev = pci_get_drvdata(pdev);
10286         struct bnx2x *bp = netdev_priv(dev);
10287
10288         rtnl_lock();
10289
10290         netif_device_detach(dev);
10291
10292         if (netif_running(dev))
10293                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10294
10295         pci_disable_device(pdev);
10296
10297         rtnl_unlock();
10298
10299         /* Request a slot reset */
10300         return PCI_ERS_RESULT_NEED_RESET;
10301 }
10302
10303 /**
10304  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10305  * @pdev: Pointer to PCI device
10306  *
10307  * Restart the card from scratch, as if from a cold-boot.
10308  */
10309 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10310 {
10311         struct net_device *dev = pci_get_drvdata(pdev);
10312         struct bnx2x *bp = netdev_priv(dev);
10313
10314         rtnl_lock();
10315
10316         if (pci_enable_device(pdev)) {
10317                 dev_err(&pdev->dev,
10318                         "Cannot re-enable PCI device after reset\n");
10319                 rtnl_unlock();
10320                 return PCI_ERS_RESULT_DISCONNECT;
10321         }
10322
10323         pci_set_master(pdev);
10324         pci_restore_state(pdev);
10325
10326         if (netif_running(dev))
10327                 bnx2x_set_power_state(bp, PCI_D0);
10328
10329         rtnl_unlock();
10330
10331         return PCI_ERS_RESULT_RECOVERED;
10332 }
10333
10334 /**
10335  * bnx2x_io_resume - called when traffic can start flowing again
10336  * @pdev: Pointer to PCI device
10337  *
10338  * This callback is called when the error recovery driver tells us that
10339  * its OK to resume normal operation.
10340  */
10341 static void bnx2x_io_resume(struct pci_dev *pdev)
10342 {
10343         struct net_device *dev = pci_get_drvdata(pdev);
10344         struct bnx2x *bp = netdev_priv(dev);
10345
10346         rtnl_lock();
10347
10348         if (netif_running(dev))
10349                 bnx2x_nic_load(bp, LOAD_OPEN);
10350
10351         netif_device_attach(dev);
10352
10353         rtnl_unlock();
10354 }
10355
10356 static struct pci_error_handlers bnx2x_err_handler = {
10357         .error_detected = bnx2x_io_error_detected,
10358         .slot_reset = bnx2x_io_slot_reset,
10359         .resume = bnx2x_io_resume,
10360 };
10361
10362 static struct pci_driver bnx2x_pci_driver = {
10363         .name        = DRV_MODULE_NAME,
10364         .id_table    = bnx2x_pci_tbl,
10365         .probe       = bnx2x_init_one,
10366         .remove      = __devexit_p(bnx2x_remove_one),
10367         .suspend     = bnx2x_suspend,
10368         .resume      = bnx2x_resume,
10369         .err_handler = &bnx2x_err_handler,
10370 };
10371
10372 static int __init bnx2x_init(void)
10373 {
10374         return pci_register_driver(&bnx2x_pci_driver);
10375 }
10376
10377 static void __exit bnx2x_cleanup(void)
10378 {
10379         pci_unregister_driver(&bnx2x_pci_driver);
10380 }
10381
10382 module_init(bnx2x_init);
10383 module_exit(bnx2x_cleanup);
10384