]> err.no Git - linux-2.6/blob - drivers/net/bnx2x_main.c
bnx2x: Load/Unload under traffic
[linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 /* adjust delay for emulation/FPGA */
239                 if (CHIP_REV_IS_SLOW(bp))
240                         msleep(100);
241                 else
242                         udelay(5);
243
244                 if (!cnt) {
245                         BNX2X_ERR("dmae timeout!\n");
246                         break;
247                 }
248                 cnt--;
249         }
250
251         mutex_unlock(&bp->dmae_mutex);
252 }
253
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 {
256         struct dmae_command *dmae = &bp->init_dmae;
257         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
258         int cnt = 200;
259
260         if (!bp->dmae_ready) {
261                 u32 *data = bnx2x_sp(bp, wb_data[0]);
262                 int i;
263
264                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
265                    "  using indirect\n", src_addr, len32);
266                 for (i = 0; i < len32; i++)
267                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268                 return;
269         }
270
271         mutex_lock(&bp->dmae_mutex);
272
273         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274         memset(dmae, 0, sizeof(struct dmae_command));
275
276         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 #ifdef __BIG_ENDIAN
280                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 #else
282                         DMAE_CMD_ENDIANITY_DW_SWAP |
283 #endif
284                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286         dmae->src_addr_lo = src_addr >> 2;
287         dmae->src_addr_hi = 0;
288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290         dmae->len = len32;
291         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293         dmae->comp_val = DMAE_COMP_VAL;
294
295         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
297                     "dst_addr [%x:%08x (%08x)]\n"
298            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
299            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302
303         *wb_comp = 0;
304
305         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306
307         udelay(5);
308
309         while (*wb_comp != DMAE_COMP_VAL) {
310
311                 /* adjust delay for emulation/FPGA */
312                 if (CHIP_REV_IS_SLOW(bp))
313                         msleep(100);
314                 else
315                         udelay(5);
316
317                 if (!cnt) {
318                         BNX2X_ERR("dmae timeout!\n");
319                         break;
320                 }
321                 cnt--;
322         }
323         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
326
327         mutex_unlock(&bp->dmae_mutex);
328 }
329
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332 {
333         u32 wb_write[2];
334
335         wb_write[0] = val_hi;
336         wb_write[1] = val_lo;
337         REG_WR_DMAE(bp, reg, wb_write, 2);
338 }
339
340 #ifdef USE_WB_RD
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342 {
343         u32 wb_data[2];
344
345         REG_RD_DMAE(bp, reg, wb_data, 2);
346
347         return HILO_U64(wb_data[0], wb_data[1]);
348 }
349 #endif
350
351 static int bnx2x_mc_assert(struct bnx2x *bp)
352 {
353         char last_idx;
354         int i, rc = 0;
355         u32 row0, row1, row2, row3;
356
357         /* XSTORM */
358         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
360         if (last_idx)
361                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363         /* print the asserts */
364         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i));
368                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377                                   " 0x%08x 0x%08x 0x%08x\n",
378                                   i, row3, row2, row1, row0);
379                         rc++;
380                 } else {
381                         break;
382                 }
383         }
384
385         /* TSTORM */
386         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
388         if (last_idx)
389                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391         /* print the asserts */
392         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i));
396                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405                                   " 0x%08x 0x%08x 0x%08x\n",
406                                   i, row3, row2, row1, row0);
407                         rc++;
408                 } else {
409                         break;
410                 }
411         }
412
413         /* CSTORM */
414         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
416         if (last_idx)
417                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419         /* print the asserts */
420         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i));
424                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433                                   " 0x%08x 0x%08x 0x%08x\n",
434                                   i, row3, row2, row1, row0);
435                         rc++;
436                 } else {
437                         break;
438                 }
439         }
440
441         /* USTORM */
442         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443                            USTORM_ASSERT_LIST_INDEX_OFFSET);
444         if (last_idx)
445                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447         /* print the asserts */
448         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i));
452                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
454                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
456                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461                                   " 0x%08x 0x%08x 0x%08x\n",
462                                   i, row3, row2, row1, row0);
463                         rc++;
464                 } else {
465                         break;
466                 }
467         }
468
469         return rc;
470 }
471
472 static void bnx2x_fw_dump(struct bnx2x *bp)
473 {
474         u32 mark, offset;
475         u32 data[9];
476         int word;
477
478         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479         mark = ((mark + 0x3) & ~0x3);
480         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
481
482         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483                 for (word = 0; word < 8; word++)
484                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485                                                   offset + 4*word));
486                 data[8] = 0x0;
487                 printk(KERN_CONT "%s", (char *)data);
488         }
489         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         printk("\n" KERN_ERR PFX "end of fw dump\n");
497 }
498
499 static void bnx2x_panic_dump(struct bnx2x *bp)
500 {
501         int i;
502         u16 j, start, end;
503
504         bp->stats_state = STATS_STATE_DISABLED;
505         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
506
507         BNX2X_ERR("begin crash dump -----------------\n");
508
509         for_each_queue(bp, i) {
510                 struct bnx2x_fastpath *fp = &bp->fp[i];
511                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
512
513                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
514                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
515                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
516                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
517                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
518                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
519                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
520                           fp->rx_bd_prod, fp->rx_bd_cons,
521                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
522                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
523                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
524                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
525                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
526                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
527                           fp->status_blk->c_status_block.status_block_index,
528                           fp->fp_u_idx,
529                           fp->status_blk->u_status_block.status_block_index,
530                           hw_prods->packets_prod, hw_prods->bds_prod);
531
532                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
533                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
534                 for (j = start; j < end; j++) {
535                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
536
537                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
538                                   sw_bd->skb, sw_bd->first_bd);
539                 }
540
541                 start = TX_BD(fp->tx_bd_cons - 10);
542                 end = TX_BD(fp->tx_bd_cons + 254);
543                 for (j = start; j < end; j++) {
544                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
545
546                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
547                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
548                 }
549
550                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
551                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
552                 for (j = start; j < end; j++) {
553                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
554                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
555
556                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
557                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
558                 }
559
560                 start = 0;
561                 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
562                 for (j = start; j < end; j++) {
563                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
564                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
565
566                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
567                                   j, rx_sge[1], rx_sge[0], sw_page->page);
568                 }
569
570                 start = RCQ_BD(fp->rx_comp_cons - 10);
571                 end = RCQ_BD(fp->rx_comp_cons + 503);
572                 for (j = start; j < end; j++) {
573                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
574
575                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
576                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
577                 }
578         }
579
580         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
581                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
582                   "  spq_prod_idx(%u)\n",
583                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
584                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
585
586         bnx2x_fw_dump(bp);
587         bnx2x_mc_assert(bp);
588         BNX2X_ERR("end crash dump -----------------\n");
589 }
590
591 static void bnx2x_int_enable(struct bnx2x *bp)
592 {
593         int port = BP_PORT(bp);
594         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
595         u32 val = REG_RD(bp, addr);
596         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
597
598         if (msix) {
599                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
600                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
602         } else {
603                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
604                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
605                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
606                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
607
608                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
609                    val, port, addr, msix);
610
611                 REG_WR(bp, addr, val);
612
613                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
614         }
615
616         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
617            val, port, addr, msix);
618
619         REG_WR(bp, addr, val);
620
621         if (CHIP_IS_E1H(bp)) {
622                 /* init leading/trailing edge */
623                 if (IS_E1HMF(bp)) {
624                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
625                         if (bp->port.pmf)
626                                 /* enable nig attention */
627                                 val |= 0x0100;
628                 } else
629                         val = 0xffff;
630
631                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
632                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
633         }
634 }
635
636 static void bnx2x_int_disable(struct bnx2x *bp)
637 {
638         int port = BP_PORT(bp);
639         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640         u32 val = REG_RD(bp, addr);
641
642         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
643                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
644                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
645                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
646
647         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
648            val, port, addr);
649
650         REG_WR(bp, addr, val);
651         if (REG_RD(bp, addr) != val)
652                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
653 }
654
655 static void bnx2x_int_disable_sync(struct bnx2x *bp)
656 {
657         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
658         int i;
659
660         /* disable interrupt handling */
661         atomic_inc(&bp->intr_sem);
662         /* prevent the HW from sending interrupts */
663         bnx2x_int_disable(bp);
664
665         /* make sure all ISRs are done */
666         if (msix) {
667                 for_each_queue(bp, i)
668                         synchronize_irq(bp->msix_table[i].vector);
669
670                 /* one more for the Slow Path IRQ */
671                 synchronize_irq(bp->msix_table[i].vector);
672         } else
673                 synchronize_irq(bp->pdev->irq);
674
675         /* make sure sp_task is not running */
676         cancel_work_sync(&bp->sp_task);
677 }
678
679 /* fast path */
680
681 /*
682  * General service functions
683  */
684
685 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
686                                 u8 storm, u16 index, u8 op, u8 update)
687 {
688         u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
689         struct igu_ack_register igu_ack;
690
691         igu_ack.status_block_index = index;
692         igu_ack.sb_id_and_flags =
693                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
694                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
695                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
696                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
697
698         DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
699            (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
700         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
701 }
702
703 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
704 {
705         struct host_status_block *fpsb = fp->status_blk;
706         u16 rc = 0;
707
708         barrier(); /* status block is written to by the chip */
709         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
710                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
711                 rc |= 1;
712         }
713         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
714                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
715                 rc |= 2;
716         }
717         return rc;
718 }
719
720 static u16 bnx2x_ack_int(struct bnx2x *bp)
721 {
722         u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
723         u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
726            result, BAR_IGU_INTMEM + igu_addr);
727
728 #ifdef IGU_DEBUG
729 #warning IGU_DEBUG active
730         if (result == 0) {
731                 BNX2X_ERR("read %x from IGU\n", result);
732                 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
733         }
734 #endif
735         return result;
736 }
737
738
739 /*
740  * fast path service functions
741  */
742
743 /* free skb in the packet ring at pos idx
744  * return idx of last bd freed
745  */
746 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
747                              u16 idx)
748 {
749         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
750         struct eth_tx_bd *tx_bd;
751         struct sk_buff *skb = tx_buf->skb;
752         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
753         int nbd;
754
755         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
756            idx, tx_buf, skb);
757
758         /* unmap first bd */
759         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
760         tx_bd = &fp->tx_desc_ring[bd_idx];
761         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
762                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
763
764         nbd = le16_to_cpu(tx_bd->nbd) - 1;
765         new_cons = nbd + tx_buf->first_bd;
766 #ifdef BNX2X_STOP_ON_ERROR
767         if (nbd > (MAX_SKB_FRAGS + 2)) {
768                 BNX2X_ERR("BAD nbd!\n");
769                 bnx2x_panic();
770         }
771 #endif
772
773         /* Skip a parse bd and the TSO split header bd
774            since they have no mapping */
775         if (nbd)
776                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
777
778         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
779                                            ETH_TX_BD_FLAGS_TCP_CSUM |
780                                            ETH_TX_BD_FLAGS_SW_LSO)) {
781                 if (--nbd)
782                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783                 tx_bd = &fp->tx_desc_ring[bd_idx];
784                 /* is this a TSO split header bd? */
785                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
786                         if (--nbd)
787                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
788                 }
789         }
790
791         /* now free frags */
792         while (nbd > 0) {
793
794                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
795                 tx_bd = &fp->tx_desc_ring[bd_idx];
796                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
797                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
798                 if (--nbd)
799                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800         }
801
802         /* release skb */
803         WARN_ON(!skb);
804         dev_kfree_skb(skb);
805         tx_buf->first_bd = 0;
806         tx_buf->skb = NULL;
807
808         return new_cons;
809 }
810
811 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
812 {
813         s16 used;
814         u16 prod;
815         u16 cons;
816
817         barrier(); /* Tell compiler that prod and cons can change */
818         prod = fp->tx_bd_prod;
819         cons = fp->tx_bd_cons;
820
821         /* NUM_TX_RINGS = number of "next-page" entries
822            It will be used as a threshold */
823         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
824
825 #ifdef BNX2X_STOP_ON_ERROR
826         WARN_ON(used < 0);
827         WARN_ON(used > fp->bp->tx_ring_size);
828         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
829 #endif
830
831         return (s16)(fp->bp->tx_ring_size) - used;
832 }
833
834 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
835 {
836         struct bnx2x *bp = fp->bp;
837         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
838         int done = 0;
839
840 #ifdef BNX2X_STOP_ON_ERROR
841         if (unlikely(bp->panic))
842                 return;
843 #endif
844
845         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
846         sw_cons = fp->tx_pkt_cons;
847
848         while (sw_cons != hw_cons) {
849                 u16 pkt_cons;
850
851                 pkt_cons = TX_BD(sw_cons);
852
853                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
854
855                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
856                    hw_cons, sw_cons, pkt_cons);
857
858 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
859                         rmb();
860                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
861                 }
862 */
863                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
864                 sw_cons++;
865                 done++;
866
867                 if (done == work)
868                         break;
869         }
870
871         fp->tx_pkt_cons = sw_cons;
872         fp->tx_bd_cons = bd_cons;
873
874         /* Need to make the tx_cons update visible to start_xmit()
875          * before checking for netif_queue_stopped().  Without the
876          * memory barrier, there is a small possibility that start_xmit()
877          * will miss it and cause the queue to be stopped forever.
878          */
879         smp_mb();
880
881         /* TBD need a thresh? */
882         if (unlikely(netif_queue_stopped(bp->dev))) {
883
884                 netif_tx_lock(bp->dev);
885
886                 if (netif_queue_stopped(bp->dev) &&
887                     (bp->state == BNX2X_STATE_OPEN) &&
888                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
889                         netif_wake_queue(bp->dev);
890
891                 netif_tx_unlock(bp->dev);
892         }
893 }
894
895 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
896                            union eth_rx_cqe *rr_cqe)
897 {
898         struct bnx2x *bp = fp->bp;
899         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
900         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
901
902         DP(BNX2X_MSG_SP,
903            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
904            FP_IDX(fp), cid, command, bp->state,
905            rr_cqe->ramrod_cqe.ramrod_type);
906
907         bp->spq_left++;
908
909         if (FP_IDX(fp)) {
910                 switch (command | fp->state) {
911                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
912                                                 BNX2X_FP_STATE_OPENING):
913                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_OPEN;
916                         break;
917
918                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
919                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
920                            cid);
921                         fp->state = BNX2X_FP_STATE_HALTED;
922                         break;
923
924                 default:
925                         BNX2X_ERR("unexpected MC reply (%d)  "
926                                   "fp->state is %x\n", command, fp->state);
927                         break;
928                 }
929                 mb(); /* force bnx2x_wait_ramrod() to see the change */
930                 return;
931         }
932
933         switch (command | bp->state) {
934         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
935                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
936                 bp->state = BNX2X_STATE_OPEN;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
941                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
942                 fp->state = BNX2X_FP_STATE_HALTED;
943                 break;
944
945         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
946                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
947                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
948                 break;
949
950         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
952                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
953                 bp->set_mac_pending = 0;
954                 break;
955
956         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
957                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
958                 break;
959
960         default:
961                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
962                           command, bp->state);
963                 break;
964         }
965         mb(); /* force bnx2x_wait_ramrod() to see the change */
966 }
967
968 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
969                                      struct bnx2x_fastpath *fp, u16 index)
970 {
971         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
972         struct page *page = sw_buf->page;
973         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
974
975         /* Skip "next page" elements */
976         if (!page)
977                 return;
978
979         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
980                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
981         __free_pages(page, PAGES_PER_SGE_SHIFT);
982
983         sw_buf->page = NULL;
984         sge->addr_hi = 0;
985         sge->addr_lo = 0;
986 }
987
988 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
989                                            struct bnx2x_fastpath *fp, int last)
990 {
991         int i;
992
993         for (i = 0; i < last; i++)
994                 bnx2x_free_rx_sge(bp, fp, i);
995 }
996
997 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
998                                      struct bnx2x_fastpath *fp, u16 index)
999 {
1000         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1001         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1002         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003         dma_addr_t mapping;
1004
1005         if (unlikely(page == NULL))
1006                 return -ENOMEM;
1007
1008         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1009                                PCI_DMA_FROMDEVICE);
1010         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1011                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1012                 return -ENOMEM;
1013         }
1014
1015         sw_buf->page = page;
1016         pci_unmap_addr_set(sw_buf, mapping, mapping);
1017
1018         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1019         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1020
1021         return 0;
1022 }
1023
1024 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1025                                      struct bnx2x_fastpath *fp, u16 index)
1026 {
1027         struct sk_buff *skb;
1028         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1029         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1030         dma_addr_t mapping;
1031
1032         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1033         if (unlikely(skb == NULL))
1034                 return -ENOMEM;
1035
1036         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1037                                  PCI_DMA_FROMDEVICE);
1038         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1039                 dev_kfree_skb(skb);
1040                 return -ENOMEM;
1041         }
1042
1043         rx_buf->skb = skb;
1044         pci_unmap_addr_set(rx_buf, mapping, mapping);
1045
1046         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1047         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1048
1049         return 0;
1050 }
1051
1052 /* note that we are not allocating a new skb,
1053  * we are just moving one from cons to prod
1054  * we are not creating a new mapping,
1055  * so there is no need to check for dma_mapping_error().
1056  */
1057 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1058                                struct sk_buff *skb, u16 cons, u16 prod)
1059 {
1060         struct bnx2x *bp = fp->bp;
1061         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1062         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1063         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1064         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1065
1066         pci_dma_sync_single_for_device(bp->pdev,
1067                                        pci_unmap_addr(cons_rx_buf, mapping),
1068                                        bp->rx_offset + RX_COPY_THRESH,
1069                                        PCI_DMA_FROMDEVICE);
1070
1071         prod_rx_buf->skb = cons_rx_buf->skb;
1072         pci_unmap_addr_set(prod_rx_buf, mapping,
1073                            pci_unmap_addr(cons_rx_buf, mapping));
1074         *prod_bd = *cons_bd;
1075 }
1076
1077 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1078                                              u16 idx)
1079 {
1080         u16 last_max = fp->last_max_sge;
1081
1082         if (SUB_S16(idx, last_max) > 0)
1083                 fp->last_max_sge = idx;
1084 }
1085
1086 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1087 {
1088         int i, j;
1089
1090         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1091                 int idx = RX_SGE_CNT * i - 1;
1092
1093                 for (j = 0; j < 2; j++) {
1094                         SGE_MASK_CLEAR_BIT(fp, idx);
1095                         idx--;
1096                 }
1097         }
1098 }
1099
1100 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1101                                   struct eth_fast_path_rx_cqe *fp_cqe)
1102 {
1103         struct bnx2x *bp = fp->bp;
1104         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1105                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1106                       BCM_PAGE_SHIFT;
1107         u16 last_max, last_elem, first_elem;
1108         u16 delta = 0;
1109         u16 i;
1110
1111         if (!sge_len)
1112                 return;
1113
1114         /* First mark all used pages */
1115         for (i = 0; i < sge_len; i++)
1116                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1117
1118         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1119            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1120
1121         /* Here we assume that the last SGE index is the biggest */
1122         prefetch((void *)(fp->sge_mask));
1123         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1124
1125         last_max = RX_SGE(fp->last_max_sge);
1126         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1127         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1128
1129         /* If ring is not full */
1130         if (last_elem + 1 != first_elem)
1131                 last_elem++;
1132
1133         /* Now update the prod */
1134         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1135                 if (likely(fp->sge_mask[i]))
1136                         break;
1137
1138                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1139                 delta += RX_SGE_MASK_ELEM_SZ;
1140         }
1141
1142         if (delta > 0) {
1143                 fp->rx_sge_prod += delta;
1144                 /* clear page-end entries */
1145                 bnx2x_clear_sge_mask_next_elems(fp);
1146         }
1147
1148         DP(NETIF_MSG_RX_STATUS,
1149            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1150            fp->last_max_sge, fp->rx_sge_prod);
1151 }
1152
1153 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1154 {
1155         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1156         memset(fp->sge_mask, 0xff,
1157                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1158
1159         /* Clear the two last indeces in the page to 1:
1160            these are the indeces that correspond to the "next" element,
1161            hence will never be indicated and should be removed from
1162            the calculations. */
1163         bnx2x_clear_sge_mask_next_elems(fp);
1164 }
1165
1166 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1167                             struct sk_buff *skb, u16 cons, u16 prod)
1168 {
1169         struct bnx2x *bp = fp->bp;
1170         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1171         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1172         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1173         dma_addr_t mapping;
1174
1175         /* move empty skb from pool to prod and map it */
1176         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1177         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1178                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1179         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1180
1181         /* move partial skb from cons to pool (don't unmap yet) */
1182         fp->tpa_pool[queue] = *cons_rx_buf;
1183
1184         /* mark bin state as start - print error if current state != stop */
1185         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1186                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1187
1188         fp->tpa_state[queue] = BNX2X_TPA_START;
1189
1190         /* point prod_bd to new skb */
1191         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1193
1194 #ifdef BNX2X_STOP_ON_ERROR
1195         fp->tpa_queue_used |= (1 << queue);
1196 #ifdef __powerpc64__
1197         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1198 #else
1199         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1200 #endif
1201            fp->tpa_queue_used);
1202 #endif
1203 }
1204
1205 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1206                                struct sk_buff *skb,
1207                                struct eth_fast_path_rx_cqe *fp_cqe,
1208                                u16 cqe_idx)
1209 {
1210         struct sw_rx_page *rx_pg, old_rx_pg;
1211         struct page *sge;
1212         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1213         u32 i, frag_len, frag_size, pages;
1214         int err;
1215         int j;
1216
1217         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1218         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1219
1220         /* This is needed in order to enable forwarding support */
1221         if (frag_size)
1222                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1223                                                max(frag_size, (u32)len_on_bd));
1224
1225 #ifdef BNX2X_STOP_ON_ERROR
1226         if (pages > 8*PAGES_PER_SGE) {
1227                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1228                           pages, cqe_idx);
1229                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1230                           fp_cqe->pkt_len, len_on_bd);
1231                 bnx2x_panic();
1232                 return -EINVAL;
1233         }
1234 #endif
1235
1236         /* Run through the SGL and compose the fragmented skb */
1237         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1238                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1239
1240                 /* FW gives the indices of the SGE as if the ring is an array
1241                    (meaning that "next" element will consume 2 indices) */
1242                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1243                 rx_pg = &fp->rx_page_ring[sge_idx];
1244                 sge = rx_pg->page;
1245                 old_rx_pg = *rx_pg;
1246
1247                 /* If we fail to allocate a substitute page, we simply stop
1248                    where we are and drop the whole packet */
1249                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1250                 if (unlikely(err)) {
1251                         bp->eth_stats.rx_skb_alloc_failed++;
1252                         return err;
1253                 }
1254
1255                 /* Unmap the page as we r going to pass it to the stack */
1256                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1257                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1258
1259                 /* Add one frag and update the appropriate fields in the skb */
1260                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1261
1262                 skb->data_len += frag_len;
1263                 skb->truesize += frag_len;
1264                 skb->len += frag_len;
1265
1266                 frag_size -= frag_len;
1267         }
1268
1269         return 0;
1270 }
1271
1272 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1274                            u16 cqe_idx)
1275 {
1276         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1277         struct sk_buff *skb = rx_buf->skb;
1278         /* alloc new skb */
1279         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1280
1281         /* Unmap skb in the pool anyway, as we are going to change
1282            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1283            fails. */
1284         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1285                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1286
1287         if (likely(new_skb)) {
1288                 /* fix ip xsum and give it to the stack */
1289                 /* (no need to map the new skb) */
1290
1291                 prefetch(skb);
1292                 prefetch(((char *)(skb)) + 128);
1293
1294 #ifdef BNX2X_STOP_ON_ERROR
1295                 if (pad + len > bp->rx_buf_size) {
1296                         BNX2X_ERR("skb_put is about to fail...  "
1297                                   "pad %d  len %d  rx_buf_size %d\n",
1298                                   pad, len, bp->rx_buf_size);
1299                         bnx2x_panic();
1300                         return;
1301                 }
1302 #endif
1303
1304                 skb_reserve(skb, pad);
1305                 skb_put(skb, len);
1306
1307                 skb->protocol = eth_type_trans(skb, bp->dev);
1308                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1309
1310                 {
1311                         struct iphdr *iph;
1312
1313                         iph = (struct iphdr *)skb->data;
1314                         iph->check = 0;
1315                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1316                 }
1317
1318                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1319                                          &cqe->fast_path_cqe, cqe_idx)) {
1320 #ifdef BCM_VLAN
1321                         if ((bp->vlgrp != NULL) &&
1322                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1323                              PARSING_FLAGS_VLAN))
1324                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1325                                                 le16_to_cpu(cqe->fast_path_cqe.
1326                                                             vlan_tag));
1327                         else
1328 #endif
1329                                 netif_receive_skb(skb);
1330                 } else {
1331                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1332                            " - dropping packet!\n");
1333                         dev_kfree_skb(skb);
1334                 }
1335
1336                 bp->dev->last_rx = jiffies;
1337
1338                 /* put new skb in bin */
1339                 fp->tpa_pool[queue].skb = new_skb;
1340
1341         } else {
1342                 /* else drop the packet and keep the buffer in the bin */
1343                 DP(NETIF_MSG_RX_STATUS,
1344                    "Failed to allocate new skb - dropping packet!\n");
1345                 bp->eth_stats.rx_skb_alloc_failed++;
1346         }
1347
1348         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1349 }
1350
1351 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1352                                         struct bnx2x_fastpath *fp,
1353                                         u16 bd_prod, u16 rx_comp_prod,
1354                                         u16 rx_sge_prod)
1355 {
1356         struct tstorm_eth_rx_producers rx_prods = {0};
1357         int i;
1358
1359         /* Update producers */
1360         rx_prods.bd_prod = bd_prod;
1361         rx_prods.cqe_prod = rx_comp_prod;
1362         rx_prods.sge_prod = rx_sge_prod;
1363
1364         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1365                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1366                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1367                        ((u32 *)&rx_prods)[i]);
1368
1369         DP(NETIF_MSG_RX_STATUS,
1370            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1371            bd_prod, rx_comp_prod, rx_sge_prod);
1372 }
1373
1374 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1375 {
1376         struct bnx2x *bp = fp->bp;
1377         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1378         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1379         int rx_pkt = 0;
1380         u16 queue;
1381
1382 #ifdef BNX2X_STOP_ON_ERROR
1383         if (unlikely(bp->panic))
1384                 return 0;
1385 #endif
1386
1387         /* CQ "next element" is of the size of the regular element,
1388            that's why it's ok here */
1389         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1390         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1391                 hw_comp_cons++;
1392
1393         bd_cons = fp->rx_bd_cons;
1394         bd_prod = fp->rx_bd_prod;
1395         bd_prod_fw = bd_prod;
1396         sw_comp_cons = fp->rx_comp_cons;
1397         sw_comp_prod = fp->rx_comp_prod;
1398
1399         /* Memory barrier necessary as speculative reads of the rx
1400          * buffer can be ahead of the index in the status block
1401          */
1402         rmb();
1403
1404         DP(NETIF_MSG_RX_STATUS,
1405            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1406            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1407
1408         while (sw_comp_cons != hw_comp_cons) {
1409                 struct sw_rx_bd *rx_buf = NULL;
1410                 struct sk_buff *skb;
1411                 union eth_rx_cqe *cqe;
1412                 u8 cqe_fp_flags;
1413                 u16 len, pad;
1414
1415                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1416                 bd_prod = RX_BD(bd_prod);
1417                 bd_cons = RX_BD(bd_cons);
1418
1419                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1420                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1421
1422                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1423                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1424                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1425                    cqe->fast_path_cqe.rss_hash_result,
1426                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1427                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1428
1429                 /* is this a slowpath msg? */
1430                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1431                         bnx2x_sp_event(fp, cqe);
1432                         goto next_cqe;
1433
1434                 /* this is an rx packet */
1435                 } else {
1436                         rx_buf = &fp->rx_buf_ring[bd_cons];
1437                         skb = rx_buf->skb;
1438                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1439                         pad = cqe->fast_path_cqe.placement_offset;
1440
1441                         /* If CQE is marked both TPA_START and TPA_END
1442                            it is a non-TPA CQE */
1443                         if ((!fp->disable_tpa) &&
1444                             (TPA_TYPE(cqe_fp_flags) !=
1445                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1446                                 queue = cqe->fast_path_cqe.queue_index;
1447
1448                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1449                                         DP(NETIF_MSG_RX_STATUS,
1450                                            "calling tpa_start on queue %d\n",
1451                                            queue);
1452
1453                                         bnx2x_tpa_start(fp, queue, skb,
1454                                                         bd_cons, bd_prod);
1455                                         goto next_rx;
1456                                 }
1457
1458                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1459                                         DP(NETIF_MSG_RX_STATUS,
1460                                            "calling tpa_stop on queue %d\n",
1461                                            queue);
1462
1463                                         if (!BNX2X_RX_SUM_FIX(cqe))
1464                                                 BNX2X_ERR("STOP on none TCP "
1465                                                           "data\n");
1466
1467                                         /* This is a size of the linear data
1468                                            on this skb */
1469                                         len = le16_to_cpu(cqe->fast_path_cqe.
1470                                                                 len_on_bd);
1471                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1472                                                     len, cqe, comp_ring_cons);
1473 #ifdef BNX2X_STOP_ON_ERROR
1474                                         if (bp->panic)
1475                                                 return -EINVAL;
1476 #endif
1477
1478                                         bnx2x_update_sge_prod(fp,
1479                                                         &cqe->fast_path_cqe);
1480                                         goto next_cqe;
1481                                 }
1482                         }
1483
1484                         pci_dma_sync_single_for_device(bp->pdev,
1485                                         pci_unmap_addr(rx_buf, mapping),
1486                                                        pad + RX_COPY_THRESH,
1487                                                        PCI_DMA_FROMDEVICE);
1488                         prefetch(skb);
1489                         prefetch(((char *)(skb)) + 128);
1490
1491                         /* is this an error packet? */
1492                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1493                                 DP(NETIF_MSG_RX_ERR,
1494                                    "ERROR  flags %x  rx packet %u\n",
1495                                    cqe_fp_flags, sw_comp_cons);
1496                                 bp->eth_stats.rx_err_discard_pkt++;
1497                                 goto reuse_rx;
1498                         }
1499
1500                         /* Since we don't have a jumbo ring
1501                          * copy small packets if mtu > 1500
1502                          */
1503                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1504                             (len <= RX_COPY_THRESH)) {
1505                                 struct sk_buff *new_skb;
1506
1507                                 new_skb = netdev_alloc_skb(bp->dev,
1508                                                            len + pad);
1509                                 if (new_skb == NULL) {
1510                                         DP(NETIF_MSG_RX_ERR,
1511                                            "ERROR  packet dropped "
1512                                            "because of alloc failure\n");
1513                                         bp->eth_stats.rx_skb_alloc_failed++;
1514                                         goto reuse_rx;
1515                                 }
1516
1517                                 /* aligned copy */
1518                                 skb_copy_from_linear_data_offset(skb, pad,
1519                                                     new_skb->data + pad, len);
1520                                 skb_reserve(new_skb, pad);
1521                                 skb_put(new_skb, len);
1522
1523                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1524
1525                                 skb = new_skb;
1526
1527                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1528                                 pci_unmap_single(bp->pdev,
1529                                         pci_unmap_addr(rx_buf, mapping),
1530                                                  bp->rx_buf_use_size,
1531                                                  PCI_DMA_FROMDEVICE);
1532                                 skb_reserve(skb, pad);
1533                                 skb_put(skb, len);
1534
1535                         } else {
1536                                 DP(NETIF_MSG_RX_ERR,
1537                                    "ERROR  packet dropped because "
1538                                    "of alloc failure\n");
1539                                 bp->eth_stats.rx_skb_alloc_failed++;
1540 reuse_rx:
1541                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1542                                 goto next_rx;
1543                         }
1544
1545                         skb->protocol = eth_type_trans(skb, bp->dev);
1546
1547                         skb->ip_summed = CHECKSUM_NONE;
1548                         if (bp->rx_csum) {
1549                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1550                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1551                                 else
1552                                         bp->eth_stats.hw_csum_err++;
1553                         }
1554                 }
1555
1556 #ifdef BCM_VLAN
1557                 if ((bp->vlgrp != NULL) &&
1558                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1559                      PARSING_FLAGS_VLAN))
1560                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1561                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1562                 else
1563 #endif
1564                         netif_receive_skb(skb);
1565
1566                 bp->dev->last_rx = jiffies;
1567
1568 next_rx:
1569                 rx_buf->skb = NULL;
1570
1571                 bd_cons = NEXT_RX_IDX(bd_cons);
1572                 bd_prod = NEXT_RX_IDX(bd_prod);
1573                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1574                 rx_pkt++;
1575 next_cqe:
1576                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1577                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1578
1579                 if (rx_pkt == budget)
1580                         break;
1581         } /* while */
1582
1583         fp->rx_bd_cons = bd_cons;
1584         fp->rx_bd_prod = bd_prod_fw;
1585         fp->rx_comp_cons = sw_comp_cons;
1586         fp->rx_comp_prod = sw_comp_prod;
1587
1588         /* Update producers */
1589         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1590                              fp->rx_sge_prod);
1591         mmiowb(); /* keep prod updates ordered */
1592
1593         fp->rx_pkt += rx_pkt;
1594         fp->rx_calls++;
1595
1596         return rx_pkt;
1597 }
1598
1599 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1600 {
1601         struct bnx2x_fastpath *fp = fp_cookie;
1602         struct bnx2x *bp = fp->bp;
1603         struct net_device *dev = bp->dev;
1604         int index = FP_IDX(fp);
1605
1606         /* Return here if interrupt is disabled */
1607         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1608                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1609                 return IRQ_HANDLED;
1610         }
1611
1612         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1613            index, FP_SB_ID(fp));
1614         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1615
1616 #ifdef BNX2X_STOP_ON_ERROR
1617         if (unlikely(bp->panic))
1618                 return IRQ_HANDLED;
1619 #endif
1620
1621         prefetch(fp->rx_cons_sb);
1622         prefetch(fp->tx_cons_sb);
1623         prefetch(&fp->status_blk->c_status_block.status_block_index);
1624         prefetch(&fp->status_blk->u_status_block.status_block_index);
1625
1626         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1627
1628         return IRQ_HANDLED;
1629 }
1630
1631 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1632 {
1633         struct net_device *dev = dev_instance;
1634         struct bnx2x *bp = netdev_priv(dev);
1635         u16 status = bnx2x_ack_int(bp);
1636         u16 mask;
1637
1638         /* Return here if interrupt is shared and it's not for us */
1639         if (unlikely(status == 0)) {
1640                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1641                 return IRQ_NONE;
1642         }
1643         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1644
1645 #ifdef BNX2X_STOP_ON_ERROR
1646         if (unlikely(bp->panic))
1647                 return IRQ_HANDLED;
1648 #endif
1649
1650         /* Return here if interrupt is disabled */
1651         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1652                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1653                 return IRQ_HANDLED;
1654         }
1655
1656         mask = 0x2 << bp->fp[0].sb_id;
1657         if (status & mask) {
1658                 struct bnx2x_fastpath *fp = &bp->fp[0];
1659
1660                 prefetch(fp->rx_cons_sb);
1661                 prefetch(fp->tx_cons_sb);
1662                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1663                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1664
1665                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1666
1667                 status &= ~mask;
1668         }
1669
1670
1671         if (unlikely(status & 0x1)) {
1672                 schedule_work(&bp->sp_task);
1673
1674                 status &= ~0x1;
1675                 if (!status)
1676                         return IRQ_HANDLED;
1677         }
1678
1679         if (status)
1680                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1681                    status);
1682
1683         return IRQ_HANDLED;
1684 }
1685
1686 /* end of fast path */
1687
1688 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1689
1690 /* Link */
1691
1692 /*
1693  * General service functions
1694  */
1695
1696 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1697 {
1698         u32 lock_status;
1699         u32 resource_bit = (1 << resource);
1700         u8 port = BP_PORT(bp);
1701         int cnt;
1702
1703         /* Validating that the resource is within range */
1704         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1705                 DP(NETIF_MSG_HW,
1706                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1707                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1708                 return -EINVAL;
1709         }
1710
1711         /* Validating that the resource is not already taken */
1712         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1713         if (lock_status & resource_bit) {
1714                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1715                    lock_status, resource_bit);
1716                 return -EEXIST;
1717         }
1718
1719         /* Try for 1 second every 5ms */
1720         for (cnt = 0; cnt < 200; cnt++) {
1721                 /* Try to acquire the lock */
1722                 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1723                        resource_bit);
1724                 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         u8 port = BP_PORT(bp);
1739
1740         /* Validating that the resource is within range */
1741         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1742                 DP(NETIF_MSG_HW,
1743                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1744                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1745                 return -EINVAL;
1746         }
1747
1748         /* Validating that the resource is currently taken */
1749         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1750         if (!(lock_status & resource_bit)) {
1751                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1752                    lock_status, resource_bit);
1753                 return -EFAULT;
1754         }
1755
1756         REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1757         return 0;
1758 }
1759
1760 /* HW Lock for shared dual port PHYs */
1761 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1762 {
1763         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1764
1765         mutex_lock(&bp->port.phy_mutex);
1766
1767         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1768             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1769                 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1770 }
1771
1772 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1773 {
1774         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1775
1776         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1777             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1778                 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1779
1780         mutex_unlock(&bp->port.phy_mutex);
1781 }
1782
1783 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1784 {
1785         /* The GPIO should be swapped if swap register is set and active */
1786         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1787                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1788         int gpio_shift = gpio_num +
1789                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1790         u32 gpio_mask = (1 << gpio_shift);
1791         u32 gpio_reg;
1792
1793         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1794                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1795                 return -EINVAL;
1796         }
1797
1798         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1799         /* read GPIO and mask except the float bits */
1800         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1801
1802         switch (mode) {
1803         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1804                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1805                    gpio_num, gpio_shift);
1806                 /* clear FLOAT and set CLR */
1807                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1808                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1809                 break;
1810
1811         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set SET */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1821                    gpio_num, gpio_shift);
1822                 /* set FLOAT */
1823                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 break;
1825
1826         default:
1827                 break;
1828         }
1829
1830         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1831         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1832
1833         return 0;
1834 }
1835
1836 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1837 {
1838         u32 spio_mask = (1 << spio_num);
1839         u32 spio_reg;
1840
1841         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1842             (spio_num > MISC_REGISTERS_SPIO_7)) {
1843                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1844                 return -EINVAL;
1845         }
1846
1847         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1848         /* read SPIO and mask except the float bits */
1849         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1850
1851         switch (mode) {
1852         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1853                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1854                 /* clear FLOAT and set CLR */
1855                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1856                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1857                 break;
1858
1859         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1860                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1861                 /* clear FLOAT and set SET */
1862                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1863                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1864                 break;
1865
1866         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1867                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1868                 /* set FLOAT */
1869                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1870                 break;
1871
1872         default:
1873                 break;
1874         }
1875
1876         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1877         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1878
1879         return 0;
1880 }
1881
1882 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1883 {
1884         switch (bp->link_vars.ieee_fc) {
1885         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1886                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1887                                           ADVERTISED_Pause);
1888                 break;
1889         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1890                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1891                                          ADVERTISED_Pause);
1892                 break;
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1894                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1895                 break;
1896         default:
1897                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1898                                           ADVERTISED_Pause);
1899                 break;
1900         }
1901 }
1902
1903 static void bnx2x_link_report(struct bnx2x *bp)
1904 {
1905         if (bp->link_vars.link_up) {
1906                 if (bp->state == BNX2X_STATE_OPEN)
1907                         netif_carrier_on(bp->dev);
1908                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1909
1910                 printk("%d Mbps ", bp->link_vars.line_speed);
1911
1912                 if (bp->link_vars.duplex == DUPLEX_FULL)
1913                         printk("full duplex");
1914                 else
1915                         printk("half duplex");
1916
1917                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1918                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1919                                 printk(", receive ");
1920                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1921                                         printk("& transmit ");
1922                         } else {
1923                                 printk(", transmit ");
1924                         }
1925                         printk("flow control ON");
1926                 }
1927                 printk("\n");
1928
1929         } else { /* link_down */
1930                 netif_carrier_off(bp->dev);
1931                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1932         }
1933 }
1934
1935 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1936 {
1937         if (!BP_NOMCP(bp)) {
1938                 u8 rc;
1939
1940                 /* Initialize link parameters structure variables */
1941                 bp->link_params.mtu = bp->dev->mtu;
1942
1943                 bnx2x_phy_hw_lock(bp);
1944                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1945                 bnx2x_phy_hw_unlock(bp);
1946
1947                 if (bp->link_vars.link_up)
1948                         bnx2x_link_report(bp);
1949
1950                 bnx2x_calc_fc_adv(bp);
1951
1952                 return rc;
1953         }
1954         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1955         return -EINVAL;
1956 }
1957
1958 static void bnx2x_link_set(struct bnx2x *bp)
1959 {
1960         if (!BP_NOMCP(bp)) {
1961                 bnx2x_phy_hw_lock(bp);
1962                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1963                 bnx2x_phy_hw_unlock(bp);
1964
1965                 bnx2x_calc_fc_adv(bp);
1966         } else
1967                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1968 }
1969
1970 static void bnx2x__link_reset(struct bnx2x *bp)
1971 {
1972         if (!BP_NOMCP(bp)) {
1973                 bnx2x_phy_hw_lock(bp);
1974                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1975                 bnx2x_phy_hw_unlock(bp);
1976         } else
1977                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1978 }
1979
1980 static u8 bnx2x_link_test(struct bnx2x *bp)
1981 {
1982         u8 rc;
1983
1984         bnx2x_phy_hw_lock(bp);
1985         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1986         bnx2x_phy_hw_unlock(bp);
1987
1988         return rc;
1989 }
1990
1991 /* Calculates the sum of vn_min_rates.
1992    It's needed for further normalizing of the min_rates.
1993
1994    Returns:
1995      sum of vn_min_rates
1996        or
1997      0 - if all the min_rates are 0.
1998      In the later case fainess algorithm should be deactivated.
1999      If not all min_rates are zero then those that are zeroes will
2000      be set to 1.
2001  */
2002 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2003 {
2004         int i, port = BP_PORT(bp);
2005         u32 wsum = 0;
2006         int all_zero = 1;
2007
2008         for (i = 0; i < E1HVN_MAX; i++) {
2009                 u32 vn_cfg =
2010                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2011                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2012                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2013                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2014                         /* If min rate is zero - set it to 1 */
2015                         if (!vn_min_rate)
2016                                 vn_min_rate = DEF_MIN_RATE;
2017                         else
2018                                 all_zero = 0;
2019
2020                         wsum += vn_min_rate;
2021                 }
2022         }
2023
2024         /* ... only if all min rates are zeros - disable FAIRNESS */
2025         if (all_zero)
2026                 return 0;
2027
2028         return wsum;
2029 }
2030
2031 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2032                                    int en_fness,
2033                                    u16 port_rate,
2034                                    struct cmng_struct_per_port *m_cmng_port)
2035 {
2036         u32 r_param = port_rate / 8;
2037         int port = BP_PORT(bp);
2038         int i;
2039
2040         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2041
2042         /* Enable minmax only if we are in e1hmf mode */
2043         if (IS_E1HMF(bp)) {
2044                 u32 fair_periodic_timeout_usec;
2045                 u32 t_fair;
2046
2047                 /* Enable rate shaping and fairness */
2048                 m_cmng_port->flags.cmng_vn_enable = 1;
2049                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2050                 m_cmng_port->flags.rate_shaping_enable = 1;
2051
2052                 if (!en_fness)
2053                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2054                            "  fairness will be disabled\n");
2055
2056                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2057                 m_cmng_port->rs_vars.rs_periodic_timeout =
2058                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2059
2060                 /* this is the threshold below which no timer arming will occur
2061                    1.25 coefficient is for the threshold to be a little bigger
2062                    than the real time, to compensate for timer in-accuracy */
2063                 m_cmng_port->rs_vars.rs_threshold =
2064                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2065
2066                 /* resolution of fairness timer */
2067                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2068                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2069                 t_fair = T_FAIR_COEF / port_rate;
2070
2071                 /* this is the threshold below which we won't arm
2072                    the timer anymore */
2073                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2074
2075                 /* we multiply by 1e3/8 to get bytes/msec.
2076                    We don't want the credits to pass a credit
2077                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2078                 m_cmng_port->fair_vars.upper_bound =
2079                                                 r_param * t_fair * FAIR_MEM;
2080                 /* since each tick is 4 usec */
2081                 m_cmng_port->fair_vars.fairness_timeout =
2082                                                 fair_periodic_timeout_usec / 4;
2083
2084         } else {
2085                 /* Disable rate shaping and fairness */
2086                 m_cmng_port->flags.cmng_vn_enable = 0;
2087                 m_cmng_port->flags.fairness_enable = 0;
2088                 m_cmng_port->flags.rate_shaping_enable = 0;
2089
2090                 DP(NETIF_MSG_IFUP,
2091                    "Single function mode  minmax will be disabled\n");
2092         }
2093
2094         /* Store it to internal memory */
2095         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2096                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2097                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2098                        ((u32 *)(m_cmng_port))[i]);
2099 }
2100
2101 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2102                                    u32 wsum, u16 port_rate,
2103                                  struct cmng_struct_per_port *m_cmng_port)
2104 {
2105         struct rate_shaping_vars_per_vn m_rs_vn;
2106         struct fairness_vars_per_vn m_fair_vn;
2107         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2108         u16 vn_min_rate, vn_max_rate;
2109         int i;
2110
2111         /* If function is hidden - set min and max to zeroes */
2112         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2113                 vn_min_rate = 0;
2114                 vn_max_rate = 0;
2115
2116         } else {
2117                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2118                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2119                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2120                    if current min rate is zero - set it to 1.
2121                    This is a requirment of the algorithm. */
2122                 if ((vn_min_rate == 0) && wsum)
2123                         vn_min_rate = DEF_MIN_RATE;
2124                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2125                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2126         }
2127
2128         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2129            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2130
2131         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2132         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2133
2134         /* global vn counter - maximal Mbps for this vn */
2135         m_rs_vn.vn_counter.rate = vn_max_rate;
2136
2137         /* quota - number of bytes transmitted in this period */
2138         m_rs_vn.vn_counter.quota =
2139                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2140
2141 #ifdef BNX2X_PER_PROT_QOS
2142         /* per protocol counter */
2143         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2144                 /* maximal Mbps for this protocol */
2145                 m_rs_vn.protocol_counters[protocol].rate =
2146                                                 protocol_max_rate[protocol];
2147                 /* the quota in each timer period -
2148                    number of bytes transmitted in this period */
2149                 m_rs_vn.protocol_counters[protocol].quota =
2150                         (u32)(rs_periodic_timeout_usec *
2151                           ((double)m_rs_vn.
2152                                    protocol_counters[protocol].rate/8));
2153         }
2154 #endif
2155
2156         if (wsum) {
2157                 /* credit for each period of the fairness algorithm:
2158                    number of bytes in T_FAIR (the vn share the port rate).
2159                    wsum should not be larger than 10000, thus
2160                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2161                 m_fair_vn.vn_credit_delta =
2162                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2163                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2164                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2165                    m_fair_vn.vn_credit_delta);
2166         }
2167
2168 #ifdef BNX2X_PER_PROT_QOS
2169         do {
2170                 u32 protocolWeightSum = 0;
2171
2172                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2173                         protocolWeightSum +=
2174                                         drvInit.protocol_min_rate[protocol];
2175                 /* per protocol counter -
2176                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2177                 if (protocolWeightSum > 0) {
2178                         for (protocol = 0;
2179                              protocol < NUM_OF_PROTOCOLS; protocol++)
2180                                 /* credit for each period of the
2181                                    fairness algorithm - number of bytes in
2182                                    T_FAIR (the protocol share the vn rate) */
2183                                 m_fair_vn.protocol_credit_delta[protocol] =
2184                                         (u32)((vn_min_rate / 8) * t_fair *
2185                                         protocol_min_rate / protocolWeightSum);
2186                 }
2187         } while (0);
2188 #endif
2189
2190         /* Store it to internal memory */
2191         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2192                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2193                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2194                        ((u32 *)(&m_rs_vn))[i]);
2195
2196         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2197                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2198                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2199                        ((u32 *)(&m_fair_vn))[i]);
2200 }
2201
2202 /* This function is called upon link interrupt */
2203 static void bnx2x_link_attn(struct bnx2x *bp)
2204 {
2205         int vn;
2206
2207         /* Make sure that we are synced with the current statistics */
2208         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2209
2210         bnx2x_phy_hw_lock(bp);
2211         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2212         bnx2x_phy_hw_unlock(bp);
2213
2214         if (bp->link_vars.link_up) {
2215
2216                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2217                         struct host_port_stats *pstats;
2218
2219                         pstats = bnx2x_sp(bp, port_stats);
2220                         /* reset old bmac stats */
2221                         memset(&(pstats->mac_stx[0]), 0,
2222                                sizeof(struct mac_stx));
2223                 }
2224                 if ((bp->state == BNX2X_STATE_OPEN) ||
2225                     (bp->state == BNX2X_STATE_DISABLED))
2226                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2227         }
2228
2229         /* indicate link status */
2230         bnx2x_link_report(bp);
2231
2232         if (IS_E1HMF(bp)) {
2233                 int func;
2234
2235                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2236                         if (vn == BP_E1HVN(bp))
2237                                 continue;
2238
2239                         func = ((vn << 1) | BP_PORT(bp));
2240
2241                         /* Set the attention towards other drivers
2242                            on the same port */
2243                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2244                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2245                 }
2246         }
2247
2248         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2249                 struct cmng_struct_per_port m_cmng_port;
2250                 u32 wsum;
2251                 int port = BP_PORT(bp);
2252
2253                 /* Init RATE SHAPING and FAIRNESS contexts */
2254                 wsum = bnx2x_calc_vn_wsum(bp);
2255                 bnx2x_init_port_minmax(bp, (int)wsum,
2256                                         bp->link_vars.line_speed,
2257                                         &m_cmng_port);
2258                 if (IS_E1HMF(bp))
2259                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2260                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2261                                         wsum, bp->link_vars.line_speed,
2262                                                      &m_cmng_port);
2263         }
2264 }
2265
2266 static void bnx2x__link_status_update(struct bnx2x *bp)
2267 {
2268         if (bp->state != BNX2X_STATE_OPEN)
2269                 return;
2270
2271         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2272
2273         if (bp->link_vars.link_up)
2274                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2275         else
2276                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2277
2278         /* indicate link status */
2279         bnx2x_link_report(bp);
2280 }
2281
2282 static void bnx2x_pmf_update(struct bnx2x *bp)
2283 {
2284         int port = BP_PORT(bp);
2285         u32 val;
2286
2287         bp->port.pmf = 1;
2288         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2289
2290         /* enable nig attention */
2291         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2292         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2293         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2294
2295         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2296 }
2297
2298 /* end of Link */
2299
2300 /* slow path */
2301
2302 /*
2303  * General service functions
2304  */
2305
2306 /* the slow path queue is odd since completions arrive on the fastpath ring */
2307 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2308                          u32 data_hi, u32 data_lo, int common)
2309 {
2310         int func = BP_FUNC(bp);
2311
2312         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2313            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2314            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2315            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2316            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2317
2318 #ifdef BNX2X_STOP_ON_ERROR
2319         if (unlikely(bp->panic))
2320                 return -EIO;
2321 #endif
2322
2323         spin_lock_bh(&bp->spq_lock);
2324
2325         if (!bp->spq_left) {
2326                 BNX2X_ERR("BUG! SPQ ring full!\n");
2327                 spin_unlock_bh(&bp->spq_lock);
2328                 bnx2x_panic();
2329                 return -EBUSY;
2330         }
2331
2332         /* CID needs port number to be encoded int it */
2333         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2334                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2335                                      HW_CID(bp, cid)));
2336         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2337         if (common)
2338                 bp->spq_prod_bd->hdr.type |=
2339                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2340
2341         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2342         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2343
2344         bp->spq_left--;
2345
2346         if (bp->spq_prod_bd == bp->spq_last_bd) {
2347                 bp->spq_prod_bd = bp->spq;
2348                 bp->spq_prod_idx = 0;
2349                 DP(NETIF_MSG_TIMER, "end of spq\n");
2350
2351         } else {
2352                 bp->spq_prod_bd++;
2353                 bp->spq_prod_idx++;
2354         }
2355
2356         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2357                bp->spq_prod_idx);
2358
2359         spin_unlock_bh(&bp->spq_lock);
2360         return 0;
2361 }
2362
2363 /* acquire split MCP access lock register */
2364 static int bnx2x_lock_alr(struct bnx2x *bp)
2365 {
2366         u32 i, j, val;
2367         int rc = 0;
2368
2369         might_sleep();
2370         i = 100;
2371         for (j = 0; j < i*10; j++) {
2372                 val = (1UL << 31);
2373                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2374                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2375                 if (val & (1L << 31))
2376                         break;
2377
2378                 msleep(5);
2379         }
2380         if (!(val & (1L << 31))) {
2381                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2382                 rc = -EBUSY;
2383         }
2384
2385         return rc;
2386 }
2387
2388 /* Release split MCP access lock register */
2389 static void bnx2x_unlock_alr(struct bnx2x *bp)
2390 {
2391         u32 val = 0;
2392
2393         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2394 }
2395
2396 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2397 {
2398         struct host_def_status_block *def_sb = bp->def_status_blk;
2399         u16 rc = 0;
2400
2401         barrier(); /* status block is written to by the chip */
2402
2403         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2404                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2405                 rc |= 1;
2406         }
2407         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2408                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2409                 rc |= 2;
2410         }
2411         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2412                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2413                 rc |= 4;
2414         }
2415         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2416                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2417                 rc |= 8;
2418         }
2419         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2420                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2421                 rc |= 16;
2422         }
2423         return rc;
2424 }
2425
2426 /*
2427  * slow path service functions
2428  */
2429
2430 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2431 {
2432         int port = BP_PORT(bp);
2433         int func = BP_FUNC(bp);
2434         u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2435         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2436                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2437         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2438                                        NIG_REG_MASK_INTERRUPT_PORT0;
2439
2440         if (~bp->aeu_mask & (asserted & 0xff))
2441                 BNX2X_ERR("IGU ERROR\n");
2442         if (bp->attn_state & asserted)
2443                 BNX2X_ERR("IGU ERROR\n");
2444
2445         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2446            bp->aeu_mask, asserted);
2447         bp->aeu_mask &= ~(asserted & 0xff);
2448         DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2449
2450         REG_WR(bp, aeu_addr, bp->aeu_mask);
2451
2452         bp->attn_state |= asserted;
2453
2454         if (asserted & ATTN_HARD_WIRED_MASK) {
2455                 if (asserted & ATTN_NIG_FOR_FUNC) {
2456
2457                         /* save nig interrupt mask */
2458                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2459                         REG_WR(bp, nig_int_mask_addr, 0);
2460
2461                         bnx2x_link_attn(bp);
2462
2463                         /* handle unicore attn? */
2464                 }
2465                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2466                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2467
2468                 if (asserted & GPIO_2_FUNC)
2469                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2470
2471                 if (asserted & GPIO_3_FUNC)
2472                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2473
2474                 if (asserted & GPIO_4_FUNC)
2475                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2476
2477                 if (port == 0) {
2478                         if (asserted & ATTN_GENERAL_ATTN_1) {
2479                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2480                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2481                         }
2482                         if (asserted & ATTN_GENERAL_ATTN_2) {
2483                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2484                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2485                         }
2486                         if (asserted & ATTN_GENERAL_ATTN_3) {
2487                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2488                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2489                         }
2490                 } else {
2491                         if (asserted & ATTN_GENERAL_ATTN_4) {
2492                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2493                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2494                         }
2495                         if (asserted & ATTN_GENERAL_ATTN_5) {
2496                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2497                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2498                         }
2499                         if (asserted & ATTN_GENERAL_ATTN_6) {
2500                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2501                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2502                         }
2503                 }
2504
2505         } /* if hardwired */
2506
2507         DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2508            asserted, BAR_IGU_INTMEM + igu_addr);
2509         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2510
2511         /* now set back the mask */
2512         if (asserted & ATTN_NIG_FOR_FUNC)
2513                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2514 }
2515
2516 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2517 {
2518         int port = BP_PORT(bp);
2519         int reg_offset;
2520         u32 val;
2521
2522         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2523                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2524
2525         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2526
2527                 val = REG_RD(bp, reg_offset);
2528                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2529                 REG_WR(bp, reg_offset, val);
2530
2531                 BNX2X_ERR("SPIO5 hw attention\n");
2532
2533                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2534                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2535                         /* Fan failure attention */
2536
2537                         /* The PHY reset is controled by GPIO 1 */
2538                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2539                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2540                         /* Low power mode is controled by GPIO 2 */
2541                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2542                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2543                         /* mark the failure */
2544                         bp->link_params.ext_phy_config &=
2545                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2546                         bp->link_params.ext_phy_config |=
2547                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2548                         SHMEM_WR(bp,
2549                                  dev_info.port_hw_config[port].
2550                                                         external_phy_config,
2551                                  bp->link_params.ext_phy_config);
2552                         /* log the failure */
2553                         printk(KERN_ERR PFX "Fan Failure on Network"
2554                                " Controller %s has caused the driver to"
2555                                " shutdown the card to prevent permanent"
2556                                " damage.  Please contact Dell Support for"
2557                                " assistance\n", bp->dev->name);
2558                         break;
2559
2560                 default:
2561                         break;
2562                 }
2563         }
2564
2565         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2566
2567                 val = REG_RD(bp, reg_offset);
2568                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2569                 REG_WR(bp, reg_offset, val);
2570
2571                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2572                           (attn & HW_INTERRUT_ASSERT_SET_0));
2573                 bnx2x_panic();
2574         }
2575 }
2576
2577 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2578 {
2579         u32 val;
2580
2581         if (attn & BNX2X_DOORQ_ASSERT) {
2582
2583                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2584                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2585                 /* DORQ discard attention */
2586                 if (val & 0x2)
2587                         BNX2X_ERR("FATAL error from DORQ\n");
2588         }
2589
2590         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2591
2592                 int port = BP_PORT(bp);
2593                 int reg_offset;
2594
2595                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2596                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2597
2598                 val = REG_RD(bp, reg_offset);
2599                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2600                 REG_WR(bp, reg_offset, val);
2601
2602                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2603                           (attn & HW_INTERRUT_ASSERT_SET_1));
2604                 bnx2x_panic();
2605         }
2606 }
2607
2608 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2609 {
2610         u32 val;
2611
2612         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2613
2614                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2615                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2616                 /* CFC error attention */
2617                 if (val & 0x2)
2618                         BNX2X_ERR("FATAL error from CFC\n");
2619         }
2620
2621         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2622
2623                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2624                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2625                 /* RQ_USDMDP_FIFO_OVERFLOW */
2626                 if (val & 0x18000)
2627                         BNX2X_ERR("FATAL error from PXP\n");
2628         }
2629
2630         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2631
2632                 int port = BP_PORT(bp);
2633                 int reg_offset;
2634
2635                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2636                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2637
2638                 val = REG_RD(bp, reg_offset);
2639                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2640                 REG_WR(bp, reg_offset, val);
2641
2642                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2643                           (attn & HW_INTERRUT_ASSERT_SET_2));
2644                 bnx2x_panic();
2645         }
2646 }
2647
2648 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2649 {
2650         u32 val;
2651
2652         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2653
2654                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2655                         int func = BP_FUNC(bp);
2656
2657                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2658                         bnx2x__link_status_update(bp);
2659                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2660                                                         DRV_STATUS_PMF)
2661                                 bnx2x_pmf_update(bp);
2662
2663                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2664
2665                         BNX2X_ERR("MC assert!\n");
2666                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2667                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2668                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2670                         bnx2x_panic();
2671
2672                 } else if (attn & BNX2X_MCP_ASSERT) {
2673
2674                         BNX2X_ERR("MCP assert!\n");
2675                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2676                         bnx2x_fw_dump(bp);
2677
2678                 } else
2679                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2680         }
2681
2682         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2683                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2684                 if (attn & BNX2X_GRC_TIMEOUT) {
2685                         val = CHIP_IS_E1H(bp) ?
2686                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2687                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2688                 }
2689                 if (attn & BNX2X_GRC_RSV) {
2690                         val = CHIP_IS_E1H(bp) ?
2691                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2692                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2693                 }
2694                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2695         }
2696 }
2697
2698 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2699 {
2700         struct attn_route attn;
2701         struct attn_route group_mask;
2702         int port = BP_PORT(bp);
2703         int index;
2704         u32 reg_addr;
2705         u32 val;
2706
2707         /* need to take HW lock because MCP or other port might also
2708            try to handle this event */
2709         bnx2x_lock_alr(bp);
2710
2711         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2712         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2713         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2714         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2715         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2716            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2717
2718         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2719                 if (deasserted & (1 << index)) {
2720                         group_mask = bp->attn_group[index];
2721
2722                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2723                            index, group_mask.sig[0], group_mask.sig[1],
2724                            group_mask.sig[2], group_mask.sig[3]);
2725
2726                         bnx2x_attn_int_deasserted3(bp,
2727                                         attn.sig[3] & group_mask.sig[3]);
2728                         bnx2x_attn_int_deasserted1(bp,
2729                                         attn.sig[1] & group_mask.sig[1]);
2730                         bnx2x_attn_int_deasserted2(bp,
2731                                         attn.sig[2] & group_mask.sig[2]);
2732                         bnx2x_attn_int_deasserted0(bp,
2733                                         attn.sig[0] & group_mask.sig[0]);
2734
2735                         if ((attn.sig[0] & group_mask.sig[0] &
2736                                                 HW_PRTY_ASSERT_SET_0) ||
2737                             (attn.sig[1] & group_mask.sig[1] &
2738                                                 HW_PRTY_ASSERT_SET_1) ||
2739                             (attn.sig[2] & group_mask.sig[2] &
2740                                                 HW_PRTY_ASSERT_SET_2))
2741                                BNX2X_ERR("FATAL HW block parity attention\n");
2742                 }
2743         }
2744
2745         bnx2x_unlock_alr(bp);
2746
2747         reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2748
2749         val = ~deasserted;
2750 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2751            val, BAR_IGU_INTMEM + reg_addr); */
2752         REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2753
2754         if (bp->aeu_mask & (deasserted & 0xff))
2755                 BNX2X_ERR("IGU BUG!\n");
2756         if (~bp->attn_state & deasserted)
2757                 BNX2X_ERR("IGU BUG!\n");
2758
2759         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2760                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2761
2762         DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2763         bp->aeu_mask |= (deasserted & 0xff);
2764
2765         DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2766         REG_WR(bp, reg_addr, bp->aeu_mask);
2767
2768         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2769         bp->attn_state &= ~deasserted;
2770         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2771 }
2772
2773 static void bnx2x_attn_int(struct bnx2x *bp)
2774 {
2775         /* read local copy of bits */
2776         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2777         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2778         u32 attn_state = bp->attn_state;
2779
2780         /* look for changed bits */
2781         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2782         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2783
2784         DP(NETIF_MSG_HW,
2785            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2786            attn_bits, attn_ack, asserted, deasserted);
2787
2788         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2789                 BNX2X_ERR("BAD attention state\n");
2790
2791         /* handle bits that were raised */
2792         if (asserted)
2793                 bnx2x_attn_int_asserted(bp, asserted);
2794
2795         if (deasserted)
2796                 bnx2x_attn_int_deasserted(bp, deasserted);
2797 }
2798
2799 static void bnx2x_sp_task(struct work_struct *work)
2800 {
2801         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2802         u16 status;
2803
2804
2805         /* Return here if interrupt is disabled */
2806         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2807                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2808                 return;
2809         }
2810
2811         status = bnx2x_update_dsb_idx(bp);
2812 /*      if (status == 0)                                     */
2813 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2814
2815         DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2816
2817         /* HW attentions */
2818         if (status & 0x1)
2819                 bnx2x_attn_int(bp);
2820
2821         /* CStorm events: query_stats, port delete ramrod */
2822         if (status & 0x2)
2823                 bp->stats_pending = 0;
2824
2825         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2826                      IGU_INT_NOP, 1);
2827         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2828                      IGU_INT_NOP, 1);
2829         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2830                      IGU_INT_NOP, 1);
2831         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2832                      IGU_INT_NOP, 1);
2833         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2834                      IGU_INT_ENABLE, 1);
2835
2836 }
2837
2838 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2839 {
2840         struct net_device *dev = dev_instance;
2841         struct bnx2x *bp = netdev_priv(dev);
2842
2843         /* Return here if interrupt is disabled */
2844         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2845                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2846                 return IRQ_HANDLED;
2847         }
2848
2849         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2850
2851 #ifdef BNX2X_STOP_ON_ERROR
2852         if (unlikely(bp->panic))
2853                 return IRQ_HANDLED;
2854 #endif
2855
2856         schedule_work(&bp->sp_task);
2857
2858         return IRQ_HANDLED;
2859 }
2860
2861 /* end of slow path */
2862
2863 /* Statistics */
2864
2865 /****************************************************************************
2866 * Macros
2867 ****************************************************************************/
2868
2869 /* sum[hi:lo] += add[hi:lo] */
2870 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2871         do { \
2872                 s_lo += a_lo; \
2873                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2874         } while (0)
2875
2876 /* difference = minuend - subtrahend */
2877 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2878         do { \
2879                 if (m_lo < s_lo) { \
2880                         /* underflow */ \
2881                         d_hi = m_hi - s_hi; \
2882                         if (d_hi > 0) { \
2883                         /* we can 'loan' 1 */ \
2884                                 d_hi--; \
2885                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2886                         } else { \
2887                         /* m_hi <= s_hi */ \
2888                                 d_hi = 0; \
2889                                 d_lo = 0; \
2890                         } \
2891                 } else { \
2892                         /* m_lo >= s_lo */ \
2893                         if (m_hi < s_hi) { \
2894                                 d_hi = 0; \
2895                                 d_lo = 0; \
2896                         } else { \
2897                         /* m_hi >= s_hi */ \
2898                                 d_hi = m_hi - s_hi; \
2899                                 d_lo = m_lo - s_lo; \
2900                         } \
2901                 } \
2902         } while (0)
2903
2904 #define UPDATE_STAT64(s, t) \
2905         do { \
2906                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2907                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2908                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2909                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2910                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2911                        pstats->mac_stx[1].t##_lo, diff.lo); \
2912         } while (0)
2913
2914 #define UPDATE_STAT64_NIG(s, t) \
2915         do { \
2916                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2917                         diff.lo, new->s##_lo, old->s##_lo); \
2918                 ADD_64(estats->t##_hi, diff.hi, \
2919                        estats->t##_lo, diff.lo); \
2920         } while (0)
2921
2922 /* sum[hi:lo] += add */
2923 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2924         do { \
2925                 s_lo += a; \
2926                 s_hi += (s_lo < a) ? 1 : 0; \
2927         } while (0)
2928
2929 #define UPDATE_EXTEND_STAT(s) \
2930         do { \
2931                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2932                               pstats->mac_stx[1].s##_lo, \
2933                               new->s); \
2934         } while (0)
2935
2936 #define UPDATE_EXTEND_TSTAT(s, t) \
2937         do { \
2938                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2939                 old_tclient->s = le32_to_cpu(tclient->s); \
2940                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2941         } while (0)
2942
2943 #define UPDATE_EXTEND_XSTAT(s, t) \
2944         do { \
2945                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2946                 old_xclient->s = le32_to_cpu(xclient->s); \
2947                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2948         } while (0)
2949
2950 /*
2951  * General service functions
2952  */
2953
2954 static inline long bnx2x_hilo(u32 *hiref)
2955 {
2956         u32 lo = *(hiref + 1);
2957 #if (BITS_PER_LONG == 64)
2958         u32 hi = *hiref;
2959
2960         return HILO_U64(hi, lo);
2961 #else
2962         return lo;
2963 #endif
2964 }
2965
2966 /*
2967  * Init service functions
2968  */
2969
2970 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2971 {
2972         if (!bp->stats_pending) {
2973                 struct eth_query_ramrod_data ramrod_data = {0};
2974                 int rc;
2975
2976                 ramrod_data.drv_counter = bp->stats_counter++;
2977                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2978                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2979
2980                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2981                                    ((u32 *)&ramrod_data)[1],
2982                                    ((u32 *)&ramrod_data)[0], 0);
2983                 if (rc == 0) {
2984                         /* stats ramrod has it's own slot on the spq */
2985                         bp->spq_left++;
2986                         bp->stats_pending = 1;
2987                 }
2988         }
2989 }
2990
2991 static void bnx2x_stats_init(struct bnx2x *bp)
2992 {
2993         int port = BP_PORT(bp);
2994
2995         bp->executer_idx = 0;
2996         bp->stats_counter = 0;
2997
2998         /* port stats */
2999         if (!BP_NOMCP(bp))
3000                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3001         else
3002                 bp->port.port_stx = 0;
3003         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3004
3005         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3006         bp->port.old_nig_stats.brb_discard =
3007                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3008         bp->port.old_nig_stats.brb_truncate =
3009                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3010         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3011                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3012         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3013                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3014
3015         /* function stats */
3016         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3017         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3018         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3019         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3020
3021         bp->stats_state = STATS_STATE_DISABLED;
3022         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3023                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3024 }
3025
3026 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3027 {
3028         struct dmae_command *dmae = &bp->stats_dmae;
3029         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3030
3031         *stats_comp = DMAE_COMP_VAL;
3032
3033         /* loader */
3034         if (bp->executer_idx) {
3035                 int loader_idx = PMF_DMAE_C(bp);
3036
3037                 memset(dmae, 0, sizeof(struct dmae_command));
3038
3039                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3040                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3041                                 DMAE_CMD_DST_RESET |
3042 #ifdef __BIG_ENDIAN
3043                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3044 #else
3045                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3046 #endif
3047                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3048                                                DMAE_CMD_PORT_0) |
3049                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3050                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3051                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3052                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3053                                      sizeof(struct dmae_command) *
3054                                      (loader_idx + 1)) >> 2;
3055                 dmae->dst_addr_hi = 0;
3056                 dmae->len = sizeof(struct dmae_command) >> 2;
3057                 if (CHIP_IS_E1(bp))
3058                         dmae->len--;
3059                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3060                 dmae->comp_addr_hi = 0;
3061                 dmae->comp_val = 1;
3062
3063                 *stats_comp = 0;
3064                 bnx2x_post_dmae(bp, dmae, loader_idx);
3065
3066         } else if (bp->func_stx) {
3067                 *stats_comp = 0;
3068                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3069         }
3070 }
3071
3072 static int bnx2x_stats_comp(struct bnx2x *bp)
3073 {
3074         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3075         int cnt = 10;
3076
3077         might_sleep();
3078         while (*stats_comp != DMAE_COMP_VAL) {
3079                 msleep(1);
3080                 if (!cnt) {
3081                         BNX2X_ERR("timeout waiting for stats finished\n");
3082                         break;
3083                 }
3084                 cnt--;
3085         }
3086         return 1;
3087 }
3088
3089 /*
3090  * Statistics service functions
3091  */
3092
3093 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3094 {
3095         struct dmae_command *dmae;
3096         u32 opcode;
3097         int loader_idx = PMF_DMAE_C(bp);
3098         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099
3100         /* sanity */
3101         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3102                 BNX2X_ERR("BUG!\n");
3103                 return;
3104         }
3105
3106         bp->executer_idx = 0;
3107
3108         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3109                   DMAE_CMD_C_ENABLE |
3110                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3111 #ifdef __BIG_ENDIAN
3112                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3113 #else
3114                   DMAE_CMD_ENDIANITY_DW_SWAP |
3115 #endif
3116                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3117                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3118
3119         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3120         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3121         dmae->src_addr_lo = bp->port.port_stx >> 2;
3122         dmae->src_addr_hi = 0;
3123         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3124         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3125         dmae->len = DMAE_LEN32_RD_MAX;
3126         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3127         dmae->comp_addr_hi = 0;
3128         dmae->comp_val = 1;
3129
3130         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3131         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3132         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3133         dmae->src_addr_hi = 0;
3134         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3135                                    DMAE_LEN32_RD_MAX * 4);
3136         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3137                                    DMAE_LEN32_RD_MAX * 4);
3138         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3139         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3140         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3141         dmae->comp_val = DMAE_COMP_VAL;
3142
3143         *stats_comp = 0;
3144         bnx2x_hw_stats_post(bp);
3145         bnx2x_stats_comp(bp);
3146 }
3147
3148 static void bnx2x_port_stats_init(struct bnx2x *bp)
3149 {
3150         struct dmae_command *dmae;
3151         int port = BP_PORT(bp);
3152         int vn = BP_E1HVN(bp);
3153         u32 opcode;
3154         int loader_idx = PMF_DMAE_C(bp);
3155         u32 mac_addr;
3156         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3157
3158         /* sanity */
3159         if (!bp->link_vars.link_up || !bp->port.pmf) {
3160                 BNX2X_ERR("BUG!\n");
3161                 return;
3162         }
3163
3164         bp->executer_idx = 0;
3165
3166         /* MCP */
3167         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3168                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3169                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3170 #ifdef __BIG_ENDIAN
3171                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3172 #else
3173                   DMAE_CMD_ENDIANITY_DW_SWAP |
3174 #endif
3175                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3176                   (vn << DMAE_CMD_E1HVN_SHIFT));
3177
3178         if (bp->port.port_stx) {
3179
3180                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181                 dmae->opcode = opcode;
3182                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3183                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3184                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3185                 dmae->dst_addr_hi = 0;
3186                 dmae->len = sizeof(struct host_port_stats) >> 2;
3187                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188                 dmae->comp_addr_hi = 0;
3189                 dmae->comp_val = 1;
3190         }
3191
3192         if (bp->func_stx) {
3193
3194                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3195                 dmae->opcode = opcode;
3196                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3197                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3198                 dmae->dst_addr_lo = bp->func_stx >> 2;
3199                 dmae->dst_addr_hi = 0;
3200                 dmae->len = sizeof(struct host_func_stats) >> 2;
3201                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3202                 dmae->comp_addr_hi = 0;
3203                 dmae->comp_val = 1;
3204         }
3205
3206         /* MAC */
3207         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3208                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3209                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3210 #ifdef __BIG_ENDIAN
3211                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3212 #else
3213                   DMAE_CMD_ENDIANITY_DW_SWAP |
3214 #endif
3215                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3216                   (vn << DMAE_CMD_E1HVN_SHIFT));
3217
3218         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3219
3220                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3221                                    NIG_REG_INGRESS_BMAC0_MEM);
3222
3223                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3224                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3225                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3226                 dmae->opcode = opcode;
3227                 dmae->src_addr_lo = (mac_addr +
3228                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3229                 dmae->src_addr_hi = 0;
3230                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3231                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3232                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3233                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3234                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3235                 dmae->comp_addr_hi = 0;
3236                 dmae->comp_val = 1;
3237
3238                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3239                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3240                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3241                 dmae->opcode = opcode;
3242                 dmae->src_addr_lo = (mac_addr +
3243                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3244                 dmae->src_addr_hi = 0;
3245                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3246                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3247                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3248                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3249                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3250                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3251                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252                 dmae->comp_addr_hi = 0;
3253                 dmae->comp_val = 1;
3254
3255         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3256
3257                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3258
3259                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3260                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3261                 dmae->opcode = opcode;
3262                 dmae->src_addr_lo = (mac_addr +
3263                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3264                 dmae->src_addr_hi = 0;
3265                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3266                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3267                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3268                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3269                 dmae->comp_addr_hi = 0;
3270                 dmae->comp_val = 1;
3271
3272                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3273                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274                 dmae->opcode = opcode;
3275                 dmae->src_addr_lo = (mac_addr +
3276                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3277                 dmae->src_addr_hi = 0;
3278                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3279                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3280                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3281                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3282                 dmae->len = 1;
3283                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3284                 dmae->comp_addr_hi = 0;
3285                 dmae->comp_val = 1;
3286
3287                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3288                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3289                 dmae->opcode = opcode;
3290                 dmae->src_addr_lo = (mac_addr +
3291                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3292                 dmae->src_addr_hi = 0;
3293                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3294                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3295                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3296                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3297                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3298                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3299                 dmae->comp_addr_hi = 0;
3300                 dmae->comp_val = 1;
3301         }
3302
3303         /* NIG */
3304         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305         dmae->opcode = opcode;
3306         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3307                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3308         dmae->src_addr_hi = 0;
3309         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3310         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3311         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3312         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313         dmae->comp_addr_hi = 0;
3314         dmae->comp_val = 1;
3315
3316         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3317         dmae->opcode = opcode;
3318         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3319                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3320         dmae->src_addr_hi = 0;
3321         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3322                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3323         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3324                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3325         dmae->len = (2*sizeof(u32)) >> 2;
3326         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3327         dmae->comp_addr_hi = 0;
3328         dmae->comp_val = 1;
3329
3330         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3331         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3332                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3333                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3334 #ifdef __BIG_ENDIAN
3335                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3336 #else
3337                         DMAE_CMD_ENDIANITY_DW_SWAP |
3338 #endif
3339                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3340                         (vn << DMAE_CMD_E1HVN_SHIFT));
3341         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3342                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3343         dmae->src_addr_hi = 0;
3344         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3346         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3348         dmae->len = (2*sizeof(u32)) >> 2;
3349         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3350         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3351         dmae->comp_val = DMAE_COMP_VAL;
3352
3353         *stats_comp = 0;
3354 }
3355
3356 static void bnx2x_func_stats_init(struct bnx2x *bp)
3357 {
3358         struct dmae_command *dmae = &bp->stats_dmae;
3359         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3360
3361         /* sanity */
3362         if (!bp->func_stx) {
3363                 BNX2X_ERR("BUG!\n");
3364                 return;
3365         }
3366
3367         bp->executer_idx = 0;
3368         memset(dmae, 0, sizeof(struct dmae_command));
3369
3370         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3371                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3372                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3373 #ifdef __BIG_ENDIAN
3374                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3375 #else
3376                         DMAE_CMD_ENDIANITY_DW_SWAP |
3377 #endif
3378                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3379                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3380         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3381         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3382         dmae->dst_addr_lo = bp->func_stx >> 2;
3383         dmae->dst_addr_hi = 0;
3384         dmae->len = sizeof(struct host_func_stats) >> 2;
3385         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3386         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3387         dmae->comp_val = DMAE_COMP_VAL;
3388
3389         *stats_comp = 0;
3390 }
3391
3392 static void bnx2x_stats_start(struct bnx2x *bp)
3393 {
3394         if (bp->port.pmf)
3395                 bnx2x_port_stats_init(bp);
3396
3397         else if (bp->func_stx)
3398                 bnx2x_func_stats_init(bp);
3399
3400         bnx2x_hw_stats_post(bp);
3401         bnx2x_storm_stats_post(bp);
3402 }
3403
3404 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3405 {
3406         bnx2x_stats_comp(bp);
3407         bnx2x_stats_pmf_update(bp);
3408         bnx2x_stats_start(bp);
3409 }
3410
3411 static void bnx2x_stats_restart(struct bnx2x *bp)
3412 {
3413         bnx2x_stats_comp(bp);
3414         bnx2x_stats_start(bp);
3415 }
3416
3417 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3418 {
3419         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3420         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3421         struct regpair diff;
3422
3423         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3424         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3425         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3426         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3427         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3428         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3429         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3430         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3431         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3432         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3433         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3434         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3435         UPDATE_STAT64(tx_stat_gt127,
3436                                 tx_stat_etherstatspkts65octetsto127octets);
3437         UPDATE_STAT64(tx_stat_gt255,
3438                                 tx_stat_etherstatspkts128octetsto255octets);
3439         UPDATE_STAT64(tx_stat_gt511,
3440                                 tx_stat_etherstatspkts256octetsto511octets);
3441         UPDATE_STAT64(tx_stat_gt1023,
3442                                 tx_stat_etherstatspkts512octetsto1023octets);
3443         UPDATE_STAT64(tx_stat_gt1518,
3444                                 tx_stat_etherstatspkts1024octetsto1522octets);
3445         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3446         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3447         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3448         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3449         UPDATE_STAT64(tx_stat_gterr,
3450                                 tx_stat_dot3statsinternalmactransmiterrors);
3451         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3452 }
3453
3454 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3455 {
3456         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3457         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3458
3459         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3460         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3461         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3462         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3463         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3464         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3465         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3466         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3467         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3468         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3469         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3470         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3471         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3472         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3473         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3474         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3475         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3476         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3477         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3478         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3479         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3480         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3481         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3482         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3483         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3484         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3485         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3486         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3487         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3488         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3489         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3490 }
3491
3492 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3493 {
3494         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3495         struct nig_stats *old = &(bp->port.old_nig_stats);
3496         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3497         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3498         struct regpair diff;
3499
3500         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3501                 bnx2x_bmac_stats_update(bp);
3502
3503         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3504                 bnx2x_emac_stats_update(bp);
3505
3506         else { /* unreached */
3507                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3508                 return -1;
3509         }
3510
3511         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3512                       new->brb_discard - old->brb_discard);
3513         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3514                       new->brb_truncate - old->brb_truncate);
3515
3516         UPDATE_STAT64_NIG(egress_mac_pkt0,
3517                                         etherstatspkts1024octetsto1522octets);
3518         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3519
3520         memcpy(old, new, sizeof(struct nig_stats));
3521
3522         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3523                sizeof(struct mac_stx));
3524         estats->brb_drop_hi = pstats->brb_drop_hi;
3525         estats->brb_drop_lo = pstats->brb_drop_lo;
3526
3527         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3528
3529         return 0;
3530 }
3531
3532 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3533 {
3534         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3535         int cl_id = BP_CL_ID(bp);
3536         struct tstorm_per_port_stats *tport =
3537                                 &stats->tstorm_common.port_statistics;
3538         struct tstorm_per_client_stats *tclient =
3539                         &stats->tstorm_common.client_statistics[cl_id];
3540         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3541         struct xstorm_per_client_stats *xclient =
3542                         &stats->xstorm_common.client_statistics[cl_id];
3543         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3544         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3545         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3546         u32 diff;
3547
3548         /* are storm stats valid? */
3549         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3550                                                         bp->stats_counter) {
3551                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3552                    "  tstorm counter (%d) != stats_counter (%d)\n",
3553                    tclient->stats_counter, bp->stats_counter);
3554                 return -1;
3555         }
3556         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3557                                                         bp->stats_counter) {
3558                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3559                    "  xstorm counter (%d) != stats_counter (%d)\n",
3560                    xclient->stats_counter, bp->stats_counter);
3561                 return -2;
3562         }
3563
3564         fstats->total_bytes_received_hi =
3565         fstats->valid_bytes_received_hi =
3566                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3567         fstats->total_bytes_received_lo =
3568         fstats->valid_bytes_received_lo =
3569                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3570
3571         estats->error_bytes_received_hi =
3572                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3573         estats->error_bytes_received_lo =
3574                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3575         ADD_64(estats->error_bytes_received_hi,
3576                estats->rx_stat_ifhcinbadoctets_hi,
3577                estats->error_bytes_received_lo,
3578                estats->rx_stat_ifhcinbadoctets_lo);
3579
3580         ADD_64(fstats->total_bytes_received_hi,
3581                estats->error_bytes_received_hi,
3582                fstats->total_bytes_received_lo,
3583                estats->error_bytes_received_lo);
3584
3585         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3586         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3587                                 total_multicast_packets_received);
3588         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3589                                 total_broadcast_packets_received);
3590
3591         fstats->total_bytes_transmitted_hi =
3592                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3593         fstats->total_bytes_transmitted_lo =
3594                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3595
3596         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3597                                 total_unicast_packets_transmitted);
3598         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3599                                 total_multicast_packets_transmitted);
3600         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3601                                 total_broadcast_packets_transmitted);
3602
3603         memcpy(estats, &(fstats->total_bytes_received_hi),
3604                sizeof(struct host_func_stats) - 2*sizeof(u32));
3605
3606         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3607         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3608         estats->brb_truncate_discard =
3609                                 le32_to_cpu(tport->brb_truncate_discard);
3610         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3611
3612         old_tclient->rcv_unicast_bytes.hi =
3613                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3614         old_tclient->rcv_unicast_bytes.lo =
3615                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3616         old_tclient->rcv_broadcast_bytes.hi =
3617                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3618         old_tclient->rcv_broadcast_bytes.lo =
3619                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3620         old_tclient->rcv_multicast_bytes.hi =
3621                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3622         old_tclient->rcv_multicast_bytes.lo =
3623                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3624         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3625
3626         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3627         old_tclient->packets_too_big_discard =
3628                                 le32_to_cpu(tclient->packets_too_big_discard);
3629         estats->no_buff_discard =
3630         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3631         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3632
3633         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3634         old_xclient->unicast_bytes_sent.hi =
3635                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3636         old_xclient->unicast_bytes_sent.lo =
3637                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3638         old_xclient->multicast_bytes_sent.hi =
3639                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3640         old_xclient->multicast_bytes_sent.lo =
3641                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3642         old_xclient->broadcast_bytes_sent.hi =
3643                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3644         old_xclient->broadcast_bytes_sent.lo =
3645                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3646
3647         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3648
3649         return 0;
3650 }
3651
3652 static void bnx2x_net_stats_update(struct bnx2x *bp)
3653 {
3654         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3655         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3656         struct net_device_stats *nstats = &bp->dev->stats;
3657
3658         nstats->rx_packets =
3659                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3660                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3661                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3662
3663         nstats->tx_packets =
3664                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3665                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3666                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3667
3668         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3669
3670         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3671
3672         nstats->rx_dropped = old_tclient->checksum_discard +
3673                              estats->mac_discard;
3674         nstats->tx_dropped = 0;
3675
3676         nstats->multicast =
3677                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3678
3679         nstats->collisions =
3680                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3681                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3682                         estats->tx_stat_dot3statslatecollisions_lo +
3683                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3684
3685         estats->jabber_packets_received =
3686                                 old_tclient->packets_too_big_discard +
3687                                 estats->rx_stat_dot3statsframestoolong_lo;
3688
3689         nstats->rx_length_errors =
3690                                 estats->rx_stat_etherstatsundersizepkts_lo +
3691                                 estats->jabber_packets_received;
3692         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3693         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3694         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3695         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3696         nstats->rx_missed_errors = estats->xxoverflow_discard;
3697
3698         nstats->rx_errors = nstats->rx_length_errors +
3699                             nstats->rx_over_errors +
3700                             nstats->rx_crc_errors +
3701                             nstats->rx_frame_errors +
3702                             nstats->rx_fifo_errors +
3703                             nstats->rx_missed_errors;
3704
3705         nstats->tx_aborted_errors =
3706                         estats->tx_stat_dot3statslatecollisions_lo +
3707                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3708         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3709         nstats->tx_fifo_errors = 0;
3710         nstats->tx_heartbeat_errors = 0;
3711         nstats->tx_window_errors = 0;
3712
3713         nstats->tx_errors = nstats->tx_aborted_errors +
3714                             nstats->tx_carrier_errors;
3715 }
3716
3717 static void bnx2x_stats_update(struct bnx2x *bp)
3718 {
3719         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3720         int update = 0;
3721
3722         if (*stats_comp != DMAE_COMP_VAL)
3723                 return;
3724
3725         if (bp->port.pmf)
3726                 update = (bnx2x_hw_stats_update(bp) == 0);
3727
3728         update |= (bnx2x_storm_stats_update(bp) == 0);
3729
3730         if (update)
3731                 bnx2x_net_stats_update(bp);
3732
3733         else {
3734                 if (bp->stats_pending) {
3735                         bp->stats_pending++;
3736                         if (bp->stats_pending == 3) {
3737                                 BNX2X_ERR("stats not updated for 3 times\n");
3738                                 bnx2x_panic();
3739                                 return;
3740                         }
3741                 }
3742         }
3743
3744         if (bp->msglevel & NETIF_MSG_TIMER) {
3745                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3746                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3747                 struct net_device_stats *nstats = &bp->dev->stats;
3748                 int i;
3749
3750                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3751                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3752                                   "  tx pkt (%lx)\n",
3753                        bnx2x_tx_avail(bp->fp),
3754                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3755                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3756                                   "  rx pkt (%lx)\n",
3757                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3758                              bp->fp->rx_comp_cons),
3759                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3760                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3761                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3762                        estats->driver_xoff, estats->brb_drop_lo);
3763                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3764                         "packets_too_big_discard %u  no_buff_discard %u  "
3765                         "mac_discard %u  mac_filter_discard %u  "
3766                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3767                         "ttl0_discard %u\n",
3768                        old_tclient->checksum_discard,
3769                        old_tclient->packets_too_big_discard,
3770                        old_tclient->no_buff_discard, estats->mac_discard,
3771                        estats->mac_filter_discard, estats->xxoverflow_discard,
3772                        estats->brb_truncate_discard,
3773                        old_tclient->ttl0_discard);
3774
3775                 for_each_queue(bp, i) {
3776                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3777                                bnx2x_fp(bp, i, tx_pkt),
3778                                bnx2x_fp(bp, i, rx_pkt),
3779                                bnx2x_fp(bp, i, rx_calls));
3780                 }
3781         }
3782
3783         bnx2x_hw_stats_post(bp);
3784         bnx2x_storm_stats_post(bp);
3785 }
3786
3787 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3788 {
3789         struct dmae_command *dmae;
3790         u32 opcode;
3791         int loader_idx = PMF_DMAE_C(bp);
3792         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3793
3794         bp->executer_idx = 0;
3795
3796         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3797                   DMAE_CMD_C_ENABLE |
3798                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3799 #ifdef __BIG_ENDIAN
3800                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3801 #else
3802                   DMAE_CMD_ENDIANITY_DW_SWAP |
3803 #endif
3804                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3805                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3806
3807         if (bp->port.port_stx) {
3808
3809                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3810                 if (bp->func_stx)
3811                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3812                 else
3813                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3814                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3815                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3816                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3817                 dmae->dst_addr_hi = 0;
3818                 dmae->len = sizeof(struct host_port_stats) >> 2;
3819                 if (bp->func_stx) {
3820                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3821                         dmae->comp_addr_hi = 0;
3822                         dmae->comp_val = 1;
3823                 } else {
3824                         dmae->comp_addr_lo =
3825                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3826                         dmae->comp_addr_hi =
3827                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3828                         dmae->comp_val = DMAE_COMP_VAL;
3829
3830                         *stats_comp = 0;
3831                 }
3832         }
3833
3834         if (bp->func_stx) {
3835
3836                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3837                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3839                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3840                 dmae->dst_addr_lo = bp->func_stx >> 2;
3841                 dmae->dst_addr_hi = 0;
3842                 dmae->len = sizeof(struct host_func_stats) >> 2;
3843                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3844                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3845                 dmae->comp_val = DMAE_COMP_VAL;
3846
3847                 *stats_comp = 0;
3848         }
3849 }
3850
3851 static void bnx2x_stats_stop(struct bnx2x *bp)
3852 {
3853         int update = 0;
3854
3855         bnx2x_stats_comp(bp);
3856
3857         if (bp->port.pmf)
3858                 update = (bnx2x_hw_stats_update(bp) == 0);
3859
3860         update |= (bnx2x_storm_stats_update(bp) == 0);
3861
3862         if (update) {
3863                 bnx2x_net_stats_update(bp);
3864
3865                 if (bp->port.pmf)
3866                         bnx2x_port_stats_stop(bp);
3867
3868                 bnx2x_hw_stats_post(bp);
3869                 bnx2x_stats_comp(bp);
3870         }
3871 }
3872
3873 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3874 {
3875 }
3876
3877 static const struct {
3878         void (*action)(struct bnx2x *bp);
3879         enum bnx2x_stats_state next_state;
3880 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3881 /* state        event   */
3882 {
3883 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3884 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3885 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3886 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3887 },
3888 {
3889 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3890 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3891 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3892 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3893 }
3894 };
3895
3896 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3897 {
3898         enum bnx2x_stats_state state = bp->stats_state;
3899
3900         bnx2x_stats_stm[state][event].action(bp);
3901         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3902
3903         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3904                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3905                    state, event, bp->stats_state);
3906 }
3907
3908 static void bnx2x_timer(unsigned long data)
3909 {
3910         struct bnx2x *bp = (struct bnx2x *) data;
3911
3912         if (!netif_running(bp->dev))
3913                 return;
3914
3915         if (atomic_read(&bp->intr_sem) != 0)
3916                 goto timer_restart;
3917
3918         if (poll) {
3919                 struct bnx2x_fastpath *fp = &bp->fp[0];
3920                 int rc;
3921
3922                 bnx2x_tx_int(fp, 1000);
3923                 rc = bnx2x_rx_int(fp, 1000);
3924         }
3925
3926         if (!BP_NOMCP(bp)) {
3927                 int func = BP_FUNC(bp);
3928                 u32 drv_pulse;
3929                 u32 mcp_pulse;
3930
3931                 ++bp->fw_drv_pulse_wr_seq;
3932                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3933                 /* TBD - add SYSTEM_TIME */
3934                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3935                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3936
3937                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3938                              MCP_PULSE_SEQ_MASK);
3939                 /* The delta between driver pulse and mcp response
3940                  * should be 1 (before mcp response) or 0 (after mcp response)
3941                  */
3942                 if ((drv_pulse != mcp_pulse) &&
3943                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3944                         /* someone lost a heartbeat... */
3945                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3946                                   drv_pulse, mcp_pulse);
3947                 }
3948         }
3949
3950         if ((bp->state == BNX2X_STATE_OPEN) ||
3951             (bp->state == BNX2X_STATE_DISABLED))
3952                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3953
3954 timer_restart:
3955         mod_timer(&bp->timer, jiffies + bp->current_interval);
3956 }
3957
3958 /* end of Statistics */
3959
3960 /* nic init */
3961
3962 /*
3963  * nic init service functions
3964  */
3965
3966 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3967 {
3968         int port = BP_PORT(bp);
3969
3970         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3971                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3972                         sizeof(struct ustorm_def_status_block)/4);
3973         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3974                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3975                         sizeof(struct cstorm_def_status_block)/4);
3976 }
3977
3978 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3979                           struct host_status_block *sb, dma_addr_t mapping)
3980 {
3981         int port = BP_PORT(bp);
3982         int func = BP_FUNC(bp);
3983         int index;
3984         u64 section;
3985
3986         /* USTORM */
3987         section = ((u64)mapping) + offsetof(struct host_status_block,
3988                                             u_status_block);
3989         sb->u_status_block.status_block_id = sb_id;
3990
3991         REG_WR(bp, BAR_USTRORM_INTMEM +
3992                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
3993         REG_WR(bp, BAR_USTRORM_INTMEM +
3994                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
3995                U64_HI(section));
3996         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
3997                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
3998
3999         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4000                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4001                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4002
4003         /* CSTORM */
4004         section = ((u64)mapping) + offsetof(struct host_status_block,
4005                                             c_status_block);
4006         sb->c_status_block.status_block_id = sb_id;
4007
4008         REG_WR(bp, BAR_CSTRORM_INTMEM +
4009                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4010         REG_WR(bp, BAR_CSTRORM_INTMEM +
4011                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4012                U64_HI(section));
4013         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4014                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4015
4016         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4017                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4018                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4019
4020         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4021 }
4022
4023 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4024 {
4025         int func = BP_FUNC(bp);
4026
4027         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4028                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4029                         sizeof(struct ustorm_def_status_block)/4);
4030         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4031                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4032                         sizeof(struct cstorm_def_status_block)/4);
4033         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4034                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4035                         sizeof(struct xstorm_def_status_block)/4);
4036         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4037                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4038                         sizeof(struct tstorm_def_status_block)/4);
4039 }
4040
4041 static void bnx2x_init_def_sb(struct bnx2x *bp,
4042                               struct host_def_status_block *def_sb,
4043                               dma_addr_t mapping, int sb_id)
4044 {
4045         int port = BP_PORT(bp);
4046         int func = BP_FUNC(bp);
4047         int index, val, reg_offset;
4048         u64 section;
4049
4050         /* ATTN */
4051         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4052                                             atten_status_block);
4053         def_sb->atten_status_block.status_block_id = sb_id;
4054
4055         bp->def_att_idx = 0;
4056         bp->attn_state = 0;
4057
4058         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4059                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4060
4061         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4062                 bp->attn_group[index].sig[0] = REG_RD(bp,
4063                                                      reg_offset + 0x10*index);
4064                 bp->attn_group[index].sig[1] = REG_RD(bp,
4065                                                reg_offset + 0x4 + 0x10*index);
4066                 bp->attn_group[index].sig[2] = REG_RD(bp,
4067                                                reg_offset + 0x8 + 0x10*index);
4068                 bp->attn_group[index].sig[3] = REG_RD(bp,
4069                                                reg_offset + 0xc + 0x10*index);
4070         }
4071
4072         bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4073                                           MISC_REG_AEU_MASK_ATTN_FUNC_0));
4074
4075         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4076                              HC_REG_ATTN_MSG0_ADDR_L);
4077
4078         REG_WR(bp, reg_offset, U64_LO(section));
4079         REG_WR(bp, reg_offset + 4, U64_HI(section));
4080
4081         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4082
4083         val = REG_RD(bp, reg_offset);
4084         val |= sb_id;
4085         REG_WR(bp, reg_offset, val);
4086
4087         /* USTORM */
4088         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4089                                             u_def_status_block);
4090         def_sb->u_def_status_block.status_block_id = sb_id;
4091
4092         bp->def_u_idx = 0;
4093
4094         REG_WR(bp, BAR_USTRORM_INTMEM +
4095                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4096         REG_WR(bp, BAR_USTRORM_INTMEM +
4097                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4098                U64_HI(section));
4099         REG_WR8(bp, BAR_USTRORM_INTMEM +  DEF_USB_FUNC_OFF +
4100                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4101         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4102                BNX2X_BTR);
4103
4104         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4105                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4106                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4107
4108         /* CSTORM */
4109         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4110                                             c_def_status_block);
4111         def_sb->c_def_status_block.status_block_id = sb_id;
4112
4113         bp->def_c_idx = 0;
4114
4115         REG_WR(bp, BAR_CSTRORM_INTMEM +
4116                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4117         REG_WR(bp, BAR_CSTRORM_INTMEM +
4118                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4119                U64_HI(section));
4120         REG_WR8(bp, BAR_CSTRORM_INTMEM +  DEF_CSB_FUNC_OFF +
4121                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4122         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4123                BNX2X_BTR);
4124
4125         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4126                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4127                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4128
4129         /* TSTORM */
4130         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4131                                             t_def_status_block);
4132         def_sb->t_def_status_block.status_block_id = sb_id;
4133
4134         bp->def_t_idx = 0;
4135
4136         REG_WR(bp, BAR_TSTRORM_INTMEM +
4137                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4138         REG_WR(bp, BAR_TSTRORM_INTMEM +
4139                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4140                U64_HI(section));
4141         REG_WR8(bp, BAR_TSTRORM_INTMEM +  DEF_TSB_FUNC_OFF +
4142                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4143         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4144                BNX2X_BTR);
4145
4146         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4148                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* XSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             x_def_status_block);
4153         def_sb->x_def_status_block.status_block_id = sb_id;
4154
4155         bp->def_x_idx = 0;
4156
4157         REG_WR(bp, BAR_XSTRORM_INTMEM +
4158                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4159         REG_WR(bp, BAR_XSTRORM_INTMEM +
4160                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4161                U64_HI(section));
4162         REG_WR8(bp, BAR_XSTRORM_INTMEM +  DEF_XSB_FUNC_OFF +
4163                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4164         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4165                BNX2X_BTR);
4166
4167         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4168                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4169                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4170
4171         bp->stats_pending = 0;
4172         bp->set_mac_pending = 0;
4173
4174         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4175 }
4176
4177 static void bnx2x_update_coalesce(struct bnx2x *bp)
4178 {
4179         int port = BP_PORT(bp);
4180         int i;
4181
4182         for_each_queue(bp, i) {
4183                 int sb_id = bp->fp[i].sb_id;
4184
4185                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4186                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4187                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4188                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4189                         bp->rx_ticks/12);
4190                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4191                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4192                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4193                          bp->rx_ticks ? 0 : 1);
4194
4195                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4196                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4197                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4198                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4199                         bp->tx_ticks/12);
4200                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4201                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4202                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4203                          bp->tx_ticks ? 0 : 1);
4204         }
4205 }
4206
4207 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4208                                        struct bnx2x_fastpath *fp, int last)
4209 {
4210         int i;
4211
4212         for (i = 0; i < last; i++) {
4213                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4214                 struct sk_buff *skb = rx_buf->skb;
4215
4216                 if (skb == NULL) {
4217                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4218                         continue;
4219                 }
4220
4221                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4222                         pci_unmap_single(bp->pdev,
4223                                          pci_unmap_addr(rx_buf, mapping),
4224                                          bp->rx_buf_use_size,
4225                                          PCI_DMA_FROMDEVICE);
4226
4227                 dev_kfree_skb(skb);
4228                 rx_buf->skb = NULL;
4229         }
4230 }
4231
4232 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4233 {
4234         int func = BP_FUNC(bp);
4235         u16 ring_prod, cqe_ring_prod = 0;
4236         int i, j;
4237
4238         bp->rx_buf_use_size = bp->dev->mtu;
4239         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4240         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4241
4242         if (bp->flags & TPA_ENABLE_FLAG) {
4243                 DP(NETIF_MSG_IFUP,
4244                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4245                    bp->rx_buf_use_size, bp->rx_buf_size,
4246                    bp->dev->mtu + ETH_OVREHEAD);
4247
4248                 for_each_queue(bp, j) {
4249                         for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4250                                 struct bnx2x_fastpath *fp = &bp->fp[j];
4251
4252                                 fp->tpa_pool[i].skb =
4253                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4254                                 if (!fp->tpa_pool[i].skb) {
4255                                         BNX2X_ERR("Failed to allocate TPA "
4256                                                   "skb pool for queue[%d] - "
4257                                                   "disabling TPA on this "
4258                                                   "queue!\n", j);
4259                                         bnx2x_free_tpa_pool(bp, fp, i);
4260                                         fp->disable_tpa = 1;
4261                                         break;
4262                                 }
4263                                 pci_unmap_addr_set((struct sw_rx_bd *)
4264                                                         &bp->fp->tpa_pool[i],
4265                                                    mapping, 0);
4266                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4267                         }
4268                 }
4269         }
4270
4271         for_each_queue(bp, j) {
4272                 struct bnx2x_fastpath *fp = &bp->fp[j];
4273
4274                 fp->rx_bd_cons = 0;
4275                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4276                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4277
4278                 /* "next page" elements initialization */
4279                 /* SGE ring */
4280                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4281                         struct eth_rx_sge *sge;
4282
4283                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4284                         sge->addr_hi =
4285                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4286                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4287                         sge->addr_lo =
4288                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4289                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4290                 }
4291
4292                 bnx2x_init_sge_ring_bit_mask(fp);
4293
4294                 /* RX BD ring */
4295                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4296                         struct eth_rx_bd *rx_bd;
4297
4298                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4299                         rx_bd->addr_hi =
4300                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4301                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4302                         rx_bd->addr_lo =
4303                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4304                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4305                 }
4306
4307                 /* CQ ring */
4308                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4309                         struct eth_rx_cqe_next_page *nextpg;
4310
4311                         nextpg = (struct eth_rx_cqe_next_page *)
4312                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4313                         nextpg->addr_hi =
4314                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4315                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4316                         nextpg->addr_lo =
4317                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4318                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4319                 }
4320
4321                 /* Allocate SGEs and initialize the ring elements */
4322                 for (i = 0, ring_prod = 0;
4323                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4324
4325                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4326                                 BNX2X_ERR("was only able to allocate "
4327                                           "%d rx sges\n", i);
4328                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4329                                 /* Cleanup already allocated elements */
4330                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4331                                 bnx2x_free_tpa_pool(bp, fp,
4332                                               ETH_MAX_AGGREGATION_QUEUES_E1H);
4333                                 fp->disable_tpa = 1;
4334                                 ring_prod = 0;
4335                                 break;
4336                         }
4337                         ring_prod = NEXT_SGE_IDX(ring_prod);
4338                 }
4339                 fp->rx_sge_prod = ring_prod;
4340
4341                 /* Allocate BDs and initialize BD ring */
4342                 fp->rx_comp_cons = 0;
4343                 cqe_ring_prod = ring_prod = 0;
4344                 for (i = 0; i < bp->rx_ring_size; i++) {
4345                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4346                                 BNX2X_ERR("was only able to allocate "
4347                                           "%d rx skbs\n", i);
4348                                 bp->eth_stats.rx_skb_alloc_failed++;
4349                                 break;
4350                         }
4351                         ring_prod = NEXT_RX_IDX(ring_prod);
4352                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4353                         WARN_ON(ring_prod <= i);
4354                 }
4355
4356                 fp->rx_bd_prod = ring_prod;
4357                 /* must not have more available CQEs than BDs */
4358                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4359                                        cqe_ring_prod);
4360                 fp->rx_pkt = fp->rx_calls = 0;
4361
4362                 /* Warning!
4363                  * this will generate an interrupt (to the TSTORM)
4364                  * must only be done after chip is initialized
4365                  */
4366                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4367                                      fp->rx_sge_prod);
4368                 if (j != 0)
4369                         continue;
4370
4371                 REG_WR(bp, BAR_USTRORM_INTMEM +
4372                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4373                        U64_LO(fp->rx_comp_mapping));
4374                 REG_WR(bp, BAR_USTRORM_INTMEM +
4375                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4376                        U64_HI(fp->rx_comp_mapping));
4377         }
4378 }
4379
4380 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4381 {
4382         int i, j;
4383
4384         for_each_queue(bp, j) {
4385                 struct bnx2x_fastpath *fp = &bp->fp[j];
4386
4387                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4388                         struct eth_tx_bd *tx_bd =
4389                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4390
4391                         tx_bd->addr_hi =
4392                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4393                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4394                         tx_bd->addr_lo =
4395                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4396                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4397                 }
4398
4399                 fp->tx_pkt_prod = 0;
4400                 fp->tx_pkt_cons = 0;
4401                 fp->tx_bd_prod = 0;
4402                 fp->tx_bd_cons = 0;
4403                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4404                 fp->tx_pkt = 0;
4405         }
4406 }
4407
4408 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4409 {
4410         int func = BP_FUNC(bp);
4411
4412         spin_lock_init(&bp->spq_lock);
4413
4414         bp->spq_left = MAX_SPQ_PENDING;
4415         bp->spq_prod_idx = 0;
4416         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4417         bp->spq_prod_bd = bp->spq;
4418         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4419
4420         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4421                U64_LO(bp->spq_mapping));
4422         REG_WR(bp,
4423                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4424                U64_HI(bp->spq_mapping));
4425
4426         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4427                bp->spq_prod_idx);
4428 }
4429
4430 static void bnx2x_init_context(struct bnx2x *bp)
4431 {
4432         int i;
4433
4434         for_each_queue(bp, i) {
4435                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4436                 struct bnx2x_fastpath *fp = &bp->fp[i];
4437                 u8 sb_id = FP_SB_ID(fp);
4438
4439                 context->xstorm_st_context.tx_bd_page_base_hi =
4440                                                 U64_HI(fp->tx_desc_mapping);
4441                 context->xstorm_st_context.tx_bd_page_base_lo =
4442                                                 U64_LO(fp->tx_desc_mapping);
4443                 context->xstorm_st_context.db_data_addr_hi =
4444                                                 U64_HI(fp->tx_prods_mapping);
4445                 context->xstorm_st_context.db_data_addr_lo =
4446                                                 U64_LO(fp->tx_prods_mapping);
4447                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4448                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4449
4450                 context->ustorm_st_context.common.sb_index_numbers =
4451                                                 BNX2X_RX_SB_INDEX_NUM;
4452                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4453                 context->ustorm_st_context.common.status_block_id = sb_id;
4454                 context->ustorm_st_context.common.flags =
4455                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4456                 context->ustorm_st_context.common.mc_alignment_size = 64;
4457                 context->ustorm_st_context.common.bd_buff_size =
4458                                                 bp->rx_buf_use_size;
4459                 context->ustorm_st_context.common.bd_page_base_hi =
4460                                                 U64_HI(fp->rx_desc_mapping);
4461                 context->ustorm_st_context.common.bd_page_base_lo =
4462                                                 U64_LO(fp->rx_desc_mapping);
4463                 if (!fp->disable_tpa) {
4464                         context->ustorm_st_context.common.flags |=
4465                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4466                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4467                         context->ustorm_st_context.common.sge_buff_size =
4468                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4469                         context->ustorm_st_context.common.sge_page_base_hi =
4470                                                 U64_HI(fp->rx_sge_mapping);
4471                         context->ustorm_st_context.common.sge_page_base_lo =
4472                                                 U64_LO(fp->rx_sge_mapping);
4473                 }
4474
4475                 context->cstorm_st_context.sb_index_number =
4476                                                 HC_INDEX_C_ETH_TX_CQ_CONS;
4477                 context->cstorm_st_context.status_block_id = sb_id;
4478
4479                 context->xstorm_ag_context.cdu_reserved =
4480                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4481                                                CDU_REGION_NUMBER_XCM_AG,
4482                                                ETH_CONNECTION_TYPE);
4483                 context->ustorm_ag_context.cdu_usage =
4484                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4485                                                CDU_REGION_NUMBER_UCM_AG,
4486                                                ETH_CONNECTION_TYPE);
4487         }
4488 }
4489
4490 static void bnx2x_init_ind_table(struct bnx2x *bp)
4491 {
4492         int port = BP_PORT(bp);
4493         int i;
4494
4495         if (!is_multi(bp))
4496                 return;
4497
4498         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4499         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4500                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4501                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4502                         i % bp->num_queues);
4503
4504         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4505 }
4506
4507 static void bnx2x_set_client_config(struct bnx2x *bp)
4508 {
4509         struct tstorm_eth_client_config tstorm_client = {0};
4510         int port = BP_PORT(bp);
4511         int i;
4512
4513         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4514         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4515         tstorm_client.config_flags =
4516                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4517 #ifdef BCM_VLAN
4518         if (bp->rx_mode && bp->vlgrp) {
4519                 tstorm_client.config_flags |=
4520                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4521                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4522         }
4523 #endif
4524
4525         if (bp->flags & TPA_ENABLE_FLAG) {
4526                 tstorm_client.max_sges_for_packet =
4527                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4528                 tstorm_client.max_sges_for_packet =
4529                         ((tstorm_client.max_sges_for_packet +
4530                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4531                         PAGES_PER_SGE_SHIFT;
4532
4533                 tstorm_client.config_flags |=
4534                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4535         }
4536
4537         for_each_queue(bp, i) {
4538                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4539                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4540                        ((u32 *)&tstorm_client)[0]);
4541                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4542                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4543                        ((u32 *)&tstorm_client)[1]);
4544         }
4545
4546         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4547            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4548 }
4549
4550 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4551 {
4552         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4553         int mode = bp->rx_mode;
4554         int mask = (1 << BP_L_ID(bp));
4555         int func = BP_FUNC(bp);
4556         int i;
4557
4558         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4559
4560         switch (mode) {
4561         case BNX2X_RX_MODE_NONE: /* no Rx */
4562                 tstorm_mac_filter.ucast_drop_all = mask;
4563                 tstorm_mac_filter.mcast_drop_all = mask;
4564                 tstorm_mac_filter.bcast_drop_all = mask;
4565                 break;
4566         case BNX2X_RX_MODE_NORMAL:
4567                 tstorm_mac_filter.bcast_accept_all = mask;
4568                 break;
4569         case BNX2X_RX_MODE_ALLMULTI:
4570                 tstorm_mac_filter.mcast_accept_all = mask;
4571                 tstorm_mac_filter.bcast_accept_all = mask;
4572                 break;
4573         case BNX2X_RX_MODE_PROMISC:
4574                 tstorm_mac_filter.ucast_accept_all = mask;
4575                 tstorm_mac_filter.mcast_accept_all = mask;
4576                 tstorm_mac_filter.bcast_accept_all = mask;
4577                 break;
4578         default:
4579                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4580                 break;
4581         }
4582
4583         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4584                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4585                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4586                        ((u32 *)&tstorm_mac_filter)[i]);
4587
4588 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4589                    ((u32 *)&tstorm_mac_filter)[i]); */
4590         }
4591
4592         if (mode != BNX2X_RX_MODE_NONE)
4593                 bnx2x_set_client_config(bp);
4594 }
4595
4596 static void bnx2x_init_internal_common(struct bnx2x *bp)
4597 {
4598         int i;
4599
4600         /* Zero this manually as its initialization is
4601            currently missing in the initTool */
4602         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4603                 REG_WR(bp, BAR_USTRORM_INTMEM +
4604                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4605 }
4606
4607 static void bnx2x_init_internal_port(struct bnx2x *bp)
4608 {
4609         int port = BP_PORT(bp);
4610
4611         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4612         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4613         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4614         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4615 }
4616
4617 static void bnx2x_init_internal_func(struct bnx2x *bp)
4618 {
4619         struct tstorm_eth_function_common_config tstorm_config = {0};
4620         struct stats_indication_flags stats_flags = {0};
4621         int port = BP_PORT(bp);
4622         int func = BP_FUNC(bp);
4623         int i;
4624         u16 max_agg_size;
4625
4626         if (is_multi(bp)) {
4627                 tstorm_config.config_flags = MULTI_FLAGS;
4628                 tstorm_config.rss_result_mask = MULTI_MASK;
4629         }
4630
4631         tstorm_config.leading_client_id = BP_L_ID(bp);
4632
4633         REG_WR(bp, BAR_TSTRORM_INTMEM +
4634                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4635                (*(u32 *)&tstorm_config));
4636
4637         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4638         bnx2x_set_storm_rx_mode(bp);
4639
4640         /* reset xstorm per client statistics */
4641         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4642                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4643                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4644                        i*4, 0);
4645         }
4646         /* reset tstorm per client statistics */
4647         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4648                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4649                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4650                        i*4, 0);
4651         }
4652
4653         /* Init statistics related context */
4654         stats_flags.collect_eth = 1;
4655
4656         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4657                ((u32 *)&stats_flags)[0]);
4658         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4659                ((u32 *)&stats_flags)[1]);
4660
4661         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4662                ((u32 *)&stats_flags)[0]);
4663         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4664                ((u32 *)&stats_flags)[1]);
4665
4666         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4667                ((u32 *)&stats_flags)[0]);
4668         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4669                ((u32 *)&stats_flags)[1]);
4670
4671         REG_WR(bp, BAR_XSTRORM_INTMEM +
4672                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4673                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4674         REG_WR(bp, BAR_XSTRORM_INTMEM +
4675                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4676                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4677
4678         REG_WR(bp, BAR_TSTRORM_INTMEM +
4679                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4680                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4681         REG_WR(bp, BAR_TSTRORM_INTMEM +
4682                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4683                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4684
4685         if (CHIP_IS_E1H(bp)) {
4686                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4687                         IS_E1HMF(bp));
4688                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4689                         IS_E1HMF(bp));
4690                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4691                         IS_E1HMF(bp));
4692                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4693                         IS_E1HMF(bp));
4694
4695                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4696                          bp->e1hov);
4697         }
4698
4699         /* Init CQ ring mapping and aggregation size */
4700         max_agg_size = min((u32)(bp->rx_buf_use_size +
4701                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4702                            (u32)0xffff);
4703         for_each_queue(bp, i) {
4704                 struct bnx2x_fastpath *fp = &bp->fp[i];
4705
4706                 REG_WR(bp, BAR_USTRORM_INTMEM +
4707                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4708                        U64_LO(fp->rx_comp_mapping));
4709                 REG_WR(bp, BAR_USTRORM_INTMEM +
4710                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4711                        U64_HI(fp->rx_comp_mapping));
4712
4713                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4714                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4715                          max_agg_size);
4716         }
4717 }
4718
4719 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4720 {
4721         switch (load_code) {
4722         case FW_MSG_CODE_DRV_LOAD_COMMON:
4723                 bnx2x_init_internal_common(bp);
4724                 /* no break */
4725
4726         case FW_MSG_CODE_DRV_LOAD_PORT:
4727                 bnx2x_init_internal_port(bp);
4728                 /* no break */
4729
4730         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4731                 bnx2x_init_internal_func(bp);
4732                 break;
4733
4734         default:
4735                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4736                 break;
4737         }
4738 }
4739
4740 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4741 {
4742         int i;
4743
4744         for_each_queue(bp, i) {
4745                 struct bnx2x_fastpath *fp = &bp->fp[i];
4746
4747                 fp->bp = bp;
4748                 fp->state = BNX2X_FP_STATE_CLOSED;
4749                 fp->index = i;
4750                 fp->cl_id = BP_L_ID(bp) + i;
4751                 fp->sb_id = fp->cl_id;
4752                 DP(NETIF_MSG_IFUP,
4753                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4754                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4755                 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4756                               fp->status_blk_mapping);
4757         }
4758
4759         bnx2x_init_def_sb(bp, bp->def_status_blk,
4760                           bp->def_status_blk_mapping, DEF_SB_ID);
4761         bnx2x_update_coalesce(bp);
4762         bnx2x_init_rx_rings(bp);
4763         bnx2x_init_tx_ring(bp);
4764         bnx2x_init_sp_ring(bp);
4765         bnx2x_init_context(bp);
4766         bnx2x_init_internal(bp, load_code);
4767         bnx2x_init_ind_table(bp);
4768         bnx2x_int_enable(bp);
4769 }
4770
4771 /* end of nic init */
4772
4773 /*
4774  * gzip service functions
4775  */
4776
4777 static int bnx2x_gunzip_init(struct bnx2x *bp)
4778 {
4779         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4780                                               &bp->gunzip_mapping);
4781         if (bp->gunzip_buf  == NULL)
4782                 goto gunzip_nomem1;
4783
4784         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4785         if (bp->strm  == NULL)
4786                 goto gunzip_nomem2;
4787
4788         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4789                                       GFP_KERNEL);
4790         if (bp->strm->workspace == NULL)
4791                 goto gunzip_nomem3;
4792
4793         return 0;
4794
4795 gunzip_nomem3:
4796         kfree(bp->strm);
4797         bp->strm = NULL;
4798
4799 gunzip_nomem2:
4800         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4801                             bp->gunzip_mapping);
4802         bp->gunzip_buf = NULL;
4803
4804 gunzip_nomem1:
4805         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4806                " un-compression\n", bp->dev->name);
4807         return -ENOMEM;
4808 }
4809
4810 static void bnx2x_gunzip_end(struct bnx2x *bp)
4811 {
4812         kfree(bp->strm->workspace);
4813
4814         kfree(bp->strm);
4815         bp->strm = NULL;
4816
4817         if (bp->gunzip_buf) {
4818                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4819                                     bp->gunzip_mapping);
4820                 bp->gunzip_buf = NULL;
4821         }
4822 }
4823
4824 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4825 {
4826         int n, rc;
4827
4828         /* check gzip header */
4829         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4830                 return -EINVAL;
4831
4832         n = 10;
4833
4834 #define FNAME                           0x8
4835
4836         if (zbuf[3] & FNAME)
4837                 while ((zbuf[n++] != 0) && (n < len));
4838
4839         bp->strm->next_in = zbuf + n;
4840         bp->strm->avail_in = len - n;
4841         bp->strm->next_out = bp->gunzip_buf;
4842         bp->strm->avail_out = FW_BUF_SIZE;
4843
4844         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4845         if (rc != Z_OK)
4846                 return rc;
4847
4848         rc = zlib_inflate(bp->strm, Z_FINISH);
4849         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4850                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4851                        bp->dev->name, bp->strm->msg);
4852
4853         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4854         if (bp->gunzip_outlen & 0x3)
4855                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4856                                     " gunzip_outlen (%d) not aligned\n",
4857                        bp->dev->name, bp->gunzip_outlen);
4858         bp->gunzip_outlen >>= 2;
4859
4860         zlib_inflateEnd(bp->strm);
4861
4862         if (rc == Z_STREAM_END)
4863                 return 0;
4864
4865         return rc;
4866 }
4867
4868 /* nic load/unload */
4869
4870 /*
4871  * General service functions
4872  */
4873
4874 /* send a NIG loopback debug packet */
4875 static void bnx2x_lb_pckt(struct bnx2x *bp)
4876 {
4877         u32 wb_write[3];
4878
4879         /* Ethernet source and destination addresses */
4880         wb_write[0] = 0x55555555;
4881         wb_write[1] = 0x55555555;
4882         wb_write[2] = 0x20;             /* SOP */
4883         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4884
4885         /* NON-IP protocol */
4886         wb_write[0] = 0x09000000;
4887         wb_write[1] = 0x55555555;
4888         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4889         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4890 }
4891
4892 /* some of the internal memories
4893  * are not directly readable from the driver
4894  * to test them we send debug packets
4895  */
4896 static int bnx2x_int_mem_test(struct bnx2x *bp)
4897 {
4898         int factor;
4899         int count, i;
4900         u32 val = 0;
4901
4902         if (CHIP_REV_IS_FPGA(bp))
4903                 factor = 120;
4904         else if (CHIP_REV_IS_EMUL(bp))
4905                 factor = 200;
4906         else
4907                 factor = 1;
4908
4909         DP(NETIF_MSG_HW, "start part1\n");
4910
4911         /* Disable inputs of parser neighbor blocks */
4912         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4913         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4914         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4915         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4916
4917         /*  Write 0 to parser credits for CFC search request */
4918         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4919
4920         /* send Ethernet packet */
4921         bnx2x_lb_pckt(bp);
4922
4923         /* TODO do i reset NIG statistic? */
4924         /* Wait until NIG register shows 1 packet of size 0x10 */
4925         count = 1000 * factor;
4926         while (count) {
4927
4928                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4929                 val = *bnx2x_sp(bp, wb_data[0]);
4930                 if (val == 0x10)
4931                         break;
4932
4933                 msleep(10);
4934                 count--;
4935         }
4936         if (val != 0x10) {
4937                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4938                 return -1;
4939         }
4940
4941         /* Wait until PRS register shows 1 packet */
4942         count = 1000 * factor;
4943         while (count) {
4944                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4945                 if (val == 1)
4946                         break;
4947
4948                 msleep(10);
4949                 count--;
4950         }
4951         if (val != 0x1) {
4952                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4953                 return -2;
4954         }
4955
4956         /* Reset and init BRB, PRS */
4957         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4958         msleep(50);
4959         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4960         msleep(50);
4961         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4962         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4963
4964         DP(NETIF_MSG_HW, "part2\n");
4965
4966         /* Disable inputs of parser neighbor blocks */
4967         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4968         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4969         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4970         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4971
4972         /* Write 0 to parser credits for CFC search request */
4973         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4974
4975         /* send 10 Ethernet packets */
4976         for (i = 0; i < 10; i++)
4977                 bnx2x_lb_pckt(bp);
4978
4979         /* Wait until NIG register shows 10 + 1
4980            packets of size 11*0x10 = 0xb0 */
4981         count = 1000 * factor;
4982         while (count) {
4983
4984                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4985                 val = *bnx2x_sp(bp, wb_data[0]);
4986                 if (val == 0xb0)
4987                         break;
4988
4989                 msleep(10);
4990                 count--;
4991         }
4992         if (val != 0xb0) {
4993                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4994                 return -3;
4995         }
4996
4997         /* Wait until PRS register shows 2 packets */
4998         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4999         if (val != 2)
5000                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5001
5002         /* Write 1 to parser credits for CFC search request */
5003         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5004
5005         /* Wait until PRS register shows 3 packets */
5006         msleep(10 * factor);
5007         /* Wait until NIG register shows 1 packet of size 0x10 */
5008         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5009         if (val != 3)
5010                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5011
5012         /* clear NIG EOP FIFO */
5013         for (i = 0; i < 11; i++)
5014                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5015         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5016         if (val != 1) {
5017                 BNX2X_ERR("clear of NIG failed\n");
5018                 return -4;
5019         }
5020
5021         /* Reset and init BRB, PRS, NIG */
5022         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5023         msleep(50);
5024         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5025         msleep(50);
5026         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5027         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5028 #ifndef BCM_ISCSI
5029         /* set NIC mode */
5030         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5031 #endif
5032
5033         /* Enable inputs of parser neighbor blocks */
5034         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5035         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5036         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5037         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5038
5039         DP(NETIF_MSG_HW, "done\n");
5040
5041         return 0; /* OK */
5042 }
5043
5044 static void enable_blocks_attention(struct bnx2x *bp)
5045 {
5046         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5047         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5048         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5049         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5050         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5051         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5052         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5053         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5054         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5055 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5056 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5057         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5058         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5059         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5060 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5061 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5062         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5063         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5064         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5065         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5066 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5067 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5068         if (CHIP_REV_IS_FPGA(bp))
5069                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5070         else
5071                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5072         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5073         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5074         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5075 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5076 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5077         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5078         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5079 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5080         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5081 }
5082
5083
5084 static int bnx2x_init_common(struct bnx2x *bp)
5085 {
5086         u32 val, i;
5087
5088         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5089
5090         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5091         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5092
5093         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5094         if (CHIP_IS_E1H(bp))
5095                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5096
5097         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5098         msleep(30);
5099         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5100
5101         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5102         if (CHIP_IS_E1(bp)) {
5103                 /* enable HW interrupt from PXP on USDM overflow
5104                    bit 16 on INT_MASK_0 */
5105                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5106         }
5107
5108         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5109         bnx2x_init_pxp(bp);
5110
5111 #ifdef __BIG_ENDIAN
5112         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5113         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5114         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5115         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5116         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5117         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5118
5119 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5120         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5121         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5122         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5123         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5124 #endif
5125
5126 #ifndef BCM_ISCSI
5127                 /* set NIC mode */
5128                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5129 #endif
5130
5131         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5132 #ifdef BCM_ISCSI
5133         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5134         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5135         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5136 #endif
5137
5138         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5139                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5140
5141         /* let the HW do it's magic ... */
5142         msleep(100);
5143         /* finish PXP init */
5144         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5145         if (val != 1) {
5146                 BNX2X_ERR("PXP2 CFG failed\n");
5147                 return -EBUSY;
5148         }
5149         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5150         if (val != 1) {
5151                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5152                 return -EBUSY;
5153         }
5154
5155         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5156         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5157
5158         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5159
5160         /* clean the DMAE memory */
5161         bp->dmae_ready = 1;
5162         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5163
5164         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5165         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5166         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5167         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5168
5169         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5170         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5171         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5172         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5173
5174         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5175         /* soft reset pulse */
5176         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5177         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5178
5179 #ifdef BCM_ISCSI
5180         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5181 #endif
5182
5183         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5184         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5185         if (!CHIP_REV_IS_SLOW(bp)) {
5186                 /* enable hw interrupt from doorbell Q */
5187                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5188         }
5189
5190         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5191         if (CHIP_REV_IS_SLOW(bp)) {
5192                 /* fix for emulation and FPGA for no pause */
5193                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5194                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5195                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5196                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5197         }
5198
5199         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5200         if (CHIP_IS_E1H(bp))
5201                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5202
5203         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5204         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5205         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5206         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5207
5208         if (CHIP_IS_E1H(bp)) {
5209                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5210                                 STORM_INTMEM_SIZE_E1H/2);
5211                 bnx2x_init_fill(bp,
5212                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5213                                 0, STORM_INTMEM_SIZE_E1H/2);
5214                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5215                                 STORM_INTMEM_SIZE_E1H/2);
5216                 bnx2x_init_fill(bp,
5217                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5218                                 0, STORM_INTMEM_SIZE_E1H/2);
5219                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5220                                 STORM_INTMEM_SIZE_E1H/2);
5221                 bnx2x_init_fill(bp,
5222                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5223                                 0, STORM_INTMEM_SIZE_E1H/2);
5224                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5225                                 STORM_INTMEM_SIZE_E1H/2);
5226                 bnx2x_init_fill(bp,
5227                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5228                                 0, STORM_INTMEM_SIZE_E1H/2);
5229         } else { /* E1 */
5230                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5231                                 STORM_INTMEM_SIZE_E1);
5232                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5233                                 STORM_INTMEM_SIZE_E1);
5234                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5235                                 STORM_INTMEM_SIZE_E1);
5236                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5237                                 STORM_INTMEM_SIZE_E1);
5238         }
5239
5240         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5241         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5242         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5243         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5244
5245         /* sync semi rtc */
5246         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5247                0x80000000);
5248         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5249                0x80000000);
5250
5251         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5252         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5253         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5254
5255         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5256         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5257                 REG_WR(bp, i, 0xc0cac01a);
5258                 /* TODO: replace with something meaningful */
5259         }
5260         if (CHIP_IS_E1H(bp))
5261                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5262         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5263
5264         if (sizeof(union cdu_context) != 1024)
5265                 /* we currently assume that a context is 1024 bytes */
5266                 printk(KERN_ALERT PFX "please adjust the size of"
5267                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5268
5269         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5270         val = (4 << 24) + (0 << 12) + 1024;
5271         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5272         if (CHIP_IS_E1(bp)) {
5273                 /* !!! fix pxp client crdit until excel update */
5274                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5275                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5276         }
5277
5278         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5279         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5280
5281         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5282         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5283
5284         /* PXPCS COMMON comes here */
5285         /* Reset PCIE errors for debug */
5286         REG_WR(bp, 0x2814, 0xffffffff);
5287         REG_WR(bp, 0x3820, 0xffffffff);
5288
5289         /* EMAC0 COMMON comes here */
5290         /* EMAC1 COMMON comes here */
5291         /* DBU COMMON comes here */
5292         /* DBG COMMON comes here */
5293
5294         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5295         if (CHIP_IS_E1H(bp)) {
5296                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5297                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5298         }
5299
5300         if (CHIP_REV_IS_SLOW(bp))
5301                 msleep(200);
5302
5303         /* finish CFC init */
5304         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5305         if (val != 1) {
5306                 BNX2X_ERR("CFC LL_INIT failed\n");
5307                 return -EBUSY;
5308         }
5309         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5310         if (val != 1) {
5311                 BNX2X_ERR("CFC AC_INIT failed\n");
5312                 return -EBUSY;
5313         }
5314         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5315         if (val != 1) {
5316                 BNX2X_ERR("CFC CAM_INIT failed\n");
5317                 return -EBUSY;
5318         }
5319         REG_WR(bp, CFC_REG_DEBUG0, 0);
5320
5321         /* read NIG statistic
5322            to see if this is our first up since powerup */
5323         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5324         val = *bnx2x_sp(bp, wb_data[0]);
5325
5326         /* do internal memory self test */
5327         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5328                 BNX2X_ERR("internal mem self test failed\n");
5329                 return -EBUSY;
5330         }
5331
5332         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5333         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5334                 /* Fan failure is indicated by SPIO 5 */
5335                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5336                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5337
5338                 /* set to active low mode */
5339                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5340                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5341                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5342                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5343
5344                 /* enable interrupt to signal the IGU */
5345                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5346                 val |= (1 << MISC_REGISTERS_SPIO_5);
5347                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5348                 break;
5349
5350         default:
5351                 break;
5352         }
5353
5354         /* clear PXP2 attentions */
5355         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5356
5357         enable_blocks_attention(bp);
5358
5359         if (bp->flags & TPA_ENABLE_FLAG) {
5360                 struct tstorm_eth_tpa_exist tmp = {0};
5361
5362                 tmp.tpa_exist = 1;
5363
5364                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5365                        ((u32 *)&tmp)[0]);
5366                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5367                        ((u32 *)&tmp)[1]);
5368         }
5369
5370         return 0;
5371 }
5372
5373 static int bnx2x_init_port(struct bnx2x *bp)
5374 {
5375         int port = BP_PORT(bp);
5376         u32 val;
5377
5378         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5379
5380         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5381
5382         /* Port PXP comes here */
5383         /* Port PXP2 comes here */
5384 #ifdef BCM_ISCSI
5385         /* Port0  1
5386          * Port1  385 */
5387         i++;
5388         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5389         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5390         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5391         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5392
5393         /* Port0  2
5394          * Port1  386 */
5395         i++;
5396         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5397         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5398         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5399         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5400
5401         /* Port0  3
5402          * Port1  387 */
5403         i++;
5404         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5405         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5406         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5408 #endif
5409         /* Port CMs come here */
5410
5411         /* Port QM comes here */
5412 #ifdef BCM_ISCSI
5413         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5414         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5415
5416         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5417                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5418 #endif
5419         /* Port DQ comes here */
5420         /* Port BRB1 comes here */
5421         /* Port PRS comes here */
5422         /* Port TSDM comes here */
5423         /* Port CSDM comes here */
5424         /* Port USDM comes here */
5425         /* Port XSDM comes here */
5426         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5427                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5428         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5429                              port ? USEM_PORT1_END : USEM_PORT0_END);
5430         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5431                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5432         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5433                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5434         /* Port UPB comes here */
5435         /* Port XPB comes here */
5436
5437         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5438                              port ? PBF_PORT1_END : PBF_PORT0_END);
5439
5440         /* configure PBF to work without PAUSE mtu 9000 */
5441         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5442
5443         /* update threshold */
5444         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5445         /* update init credit */
5446         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5447
5448         /* probe changes */
5449         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5450         msleep(5);
5451         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5452
5453 #ifdef BCM_ISCSI
5454         /* tell the searcher where the T2 table is */
5455         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5456
5457         wb_write[0] = U64_LO(bp->t2_mapping);
5458         wb_write[1] = U64_HI(bp->t2_mapping);
5459         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5460         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5461         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5462         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5463
5464         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5465         /* Port SRCH comes here */
5466 #endif
5467         /* Port CDU comes here */
5468         /* Port CFC comes here */
5469
5470         if (CHIP_IS_E1(bp)) {
5471                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5472                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5473         }
5474         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5475                              port ? HC_PORT1_END : HC_PORT0_END);
5476
5477         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5478                                     MISC_AEU_PORT0_START,
5479                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5480         /* init aeu_mask_attn_func_0/1:
5481          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5482          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5483          *             bits 4-7 are used for "per vn group attention" */
5484         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5485                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5486
5487         /* Port PXPCS comes here */
5488         /* Port EMAC0 comes here */
5489         /* Port EMAC1 comes here */
5490         /* Port DBU comes here */
5491         /* Port DBG comes here */
5492         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5493                              port ? NIG_PORT1_END : NIG_PORT0_END);
5494
5495         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5496
5497         if (CHIP_IS_E1H(bp)) {
5498                 u32 wsum;
5499                 struct cmng_struct_per_port m_cmng_port;
5500                 int vn;
5501
5502                 /* 0x2 disable e1hov, 0x1 enable */
5503                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5504                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5505
5506                 /* Init RATE SHAPING and FAIRNESS contexts.
5507                    Initialize as if there is 10G link. */
5508                 wsum = bnx2x_calc_vn_wsum(bp);
5509                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5510                 if (IS_E1HMF(bp))
5511                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5512                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5513                                         wsum, 10000, &m_cmng_port);
5514         }
5515
5516         /* Port MCP comes here */
5517         /* Port DMAE comes here */
5518
5519         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5520         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5521                 /* add SPIO 5 to group 0 */
5522                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5523                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5524                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5525                 break;
5526
5527         default:
5528                 break;
5529         }
5530
5531         bnx2x__link_reset(bp);
5532
5533         return 0;
5534 }
5535
5536 #define ILT_PER_FUNC            (768/2)
5537 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5538 /* the phys address is shifted right 12 bits and has an added
5539    1=valid bit added to the 53rd bit
5540    then since this is a wide register(TM)
5541    we split it into two 32 bit writes
5542  */
5543 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5544 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5545 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5546 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5547
5548 #define CNIC_ILT_LINES          0
5549
5550 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5551 {
5552         int reg;
5553
5554         if (CHIP_IS_E1H(bp))
5555                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5556         else /* E1 */
5557                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5558
5559         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5560 }
5561
5562 static int bnx2x_init_func(struct bnx2x *bp)
5563 {
5564         int port = BP_PORT(bp);
5565         int func = BP_FUNC(bp);
5566         int i;
5567
5568         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5569
5570         i = FUNC_ILT_BASE(func);
5571
5572         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5573         if (CHIP_IS_E1H(bp)) {
5574                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5575                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5576         } else /* E1 */
5577                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5578                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5579
5580
5581         if (CHIP_IS_E1H(bp)) {
5582                 for (i = 0; i < 9; i++)
5583                         bnx2x_init_block(bp,
5584                                          cm_start[func][i], cm_end[func][i]);
5585
5586                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5587                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5588         }
5589
5590         /* HC init per function */
5591         if (CHIP_IS_E1H(bp)) {
5592                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5593
5594                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5595                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5596         }
5597         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5598
5599         if (CHIP_IS_E1H(bp))
5600                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5601
5602         /* Reset PCIE errors for debug */
5603         REG_WR(bp, 0x2114, 0xffffffff);
5604         REG_WR(bp, 0x2120, 0xffffffff);
5605
5606         return 0;
5607 }
5608
5609 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5610 {
5611         int i, rc = 0;
5612
5613         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5614            BP_FUNC(bp), load_code);
5615
5616         bp->dmae_ready = 0;
5617         mutex_init(&bp->dmae_mutex);
5618         bnx2x_gunzip_init(bp);
5619
5620         switch (load_code) {
5621         case FW_MSG_CODE_DRV_LOAD_COMMON:
5622                 rc = bnx2x_init_common(bp);
5623                 if (rc)
5624                         goto init_hw_err;
5625                 /* no break */
5626
5627         case FW_MSG_CODE_DRV_LOAD_PORT:
5628                 bp->dmae_ready = 1;
5629                 rc = bnx2x_init_port(bp);
5630                 if (rc)
5631                         goto init_hw_err;
5632                 /* no break */
5633
5634         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5635                 bp->dmae_ready = 1;
5636                 rc = bnx2x_init_func(bp);
5637                 if (rc)
5638                         goto init_hw_err;
5639                 break;
5640
5641         default:
5642                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5643                 break;
5644         }
5645
5646         if (!BP_NOMCP(bp)) {
5647                 int func = BP_FUNC(bp);
5648
5649                 bp->fw_drv_pulse_wr_seq =
5650                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5651                                  DRV_PULSE_SEQ_MASK);
5652                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5653                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5654                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5655         } else
5656                 bp->func_stx = 0;
5657
5658         /* this needs to be done before gunzip end */
5659         bnx2x_zero_def_sb(bp);
5660         for_each_queue(bp, i)
5661                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5662
5663 init_hw_err:
5664         bnx2x_gunzip_end(bp);
5665
5666         return rc;
5667 }
5668
5669 /* send the MCP a request, block until there is a reply */
5670 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5671 {
5672         int func = BP_FUNC(bp);
5673         u32 seq = ++bp->fw_seq;
5674         u32 rc = 0;
5675         u32 cnt = 1;
5676         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5677
5678         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5679         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5680
5681         do {
5682                 /* let the FW do it's magic ... */
5683                 msleep(delay);
5684
5685                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5686
5687                 /* Give the FW up to 2 second (200*10ms) */
5688         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5689
5690         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5691            cnt*delay, rc, seq);
5692
5693         /* is this a reply to our command? */
5694         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5695                 rc &= FW_MSG_CODE_MASK;
5696
5697         } else {
5698                 /* FW BUG! */
5699                 BNX2X_ERR("FW failed to respond!\n");
5700                 bnx2x_fw_dump(bp);
5701                 rc = 0;
5702         }
5703
5704         return rc;
5705 }
5706
5707 static void bnx2x_free_mem(struct bnx2x *bp)
5708 {
5709
5710 #define BNX2X_PCI_FREE(x, y, size) \
5711         do { \
5712                 if (x) { \
5713                         pci_free_consistent(bp->pdev, size, x, y); \
5714                         x = NULL; \
5715                         y = 0; \
5716                 } \
5717         } while (0)
5718
5719 #define BNX2X_FREE(x) \
5720         do { \
5721                 if (x) { \
5722                         vfree(x); \
5723                         x = NULL; \
5724                 } \
5725         } while (0)
5726
5727         int i;
5728
5729         /* fastpath */
5730         for_each_queue(bp, i) {
5731
5732                 /* Status blocks */
5733                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5734                                bnx2x_fp(bp, i, status_blk_mapping),
5735                                sizeof(struct host_status_block) +
5736                                sizeof(struct eth_tx_db_data));
5737
5738                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5739                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5740                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5741                                bnx2x_fp(bp, i, tx_desc_mapping),
5742                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5743
5744                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5745                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5746                                bnx2x_fp(bp, i, rx_desc_mapping),
5747                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5748
5749                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5750                                bnx2x_fp(bp, i, rx_comp_mapping),
5751                                sizeof(struct eth_fast_path_rx_cqe) *
5752                                NUM_RCQ_BD);
5753
5754                 /* SGE ring */
5755                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5756                                bnx2x_fp(bp, i, rx_sge_mapping),
5757                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5758         }
5759         /* end of fastpath */
5760
5761         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5762                        sizeof(struct host_def_status_block));
5763
5764         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5765                        sizeof(struct bnx2x_slowpath));
5766
5767 #ifdef BCM_ISCSI
5768         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5769         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5770         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5771         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5772 #endif
5773         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5774
5775 #undef BNX2X_PCI_FREE
5776 #undef BNX2X_KFREE
5777 }
5778
5779 static int bnx2x_alloc_mem(struct bnx2x *bp)
5780 {
5781
5782 #define BNX2X_PCI_ALLOC(x, y, size) \
5783         do { \
5784                 x = pci_alloc_consistent(bp->pdev, size, y); \
5785                 if (x == NULL) \
5786                         goto alloc_mem_err; \
5787                 memset(x, 0, size); \
5788         } while (0)
5789
5790 #define BNX2X_ALLOC(x, size) \
5791         do { \
5792                 x = vmalloc(size); \
5793                 if (x == NULL) \
5794                         goto alloc_mem_err; \
5795                 memset(x, 0, size); \
5796         } while (0)
5797
5798         int i;
5799
5800         /* fastpath */
5801         for_each_queue(bp, i) {
5802                 bnx2x_fp(bp, i, bp) = bp;
5803
5804                 /* Status blocks */
5805                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5806                                 &bnx2x_fp(bp, i, status_blk_mapping),
5807                                 sizeof(struct host_status_block) +
5808                                 sizeof(struct eth_tx_db_data));
5809
5810                 bnx2x_fp(bp, i, hw_tx_prods) =
5811                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5812
5813                 bnx2x_fp(bp, i, tx_prods_mapping) =
5814                                 bnx2x_fp(bp, i, status_blk_mapping) +
5815                                 sizeof(struct host_status_block);
5816
5817                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5818                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5819                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5820                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5821                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5822                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5823
5824                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5825                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5826                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5827                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5828                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5829
5830                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5831                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5832                                 sizeof(struct eth_fast_path_rx_cqe) *
5833                                 NUM_RCQ_BD);
5834
5835                 /* SGE ring */
5836                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5837                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5838                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5839                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5840                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5841         }
5842         /* end of fastpath */
5843
5844         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5845                         sizeof(struct host_def_status_block));
5846
5847         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5848                         sizeof(struct bnx2x_slowpath));
5849
5850 #ifdef BCM_ISCSI
5851         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5852
5853         /* Initialize T1 */
5854         for (i = 0; i < 64*1024; i += 64) {
5855                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5856                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5857         }
5858
5859         /* allocate searcher T2 table
5860            we allocate 1/4 of alloc num for T2
5861           (which is not entered into the ILT) */
5862         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5863
5864         /* Initialize T2 */
5865         for (i = 0; i < 16*1024; i += 64)
5866                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5867
5868         /* now fixup the last line in the block to point to the next block */
5869         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5870
5871         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5872         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5873
5874         /* QM queues (128*MAX_CONN) */
5875         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5876 #endif
5877
5878         /* Slow path ring */
5879         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5880
5881         return 0;
5882
5883 alloc_mem_err:
5884         bnx2x_free_mem(bp);
5885         return -ENOMEM;
5886
5887 #undef BNX2X_PCI_ALLOC
5888 #undef BNX2X_ALLOC
5889 }
5890
5891 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5892 {
5893         int i;
5894
5895         for_each_queue(bp, i) {
5896                 struct bnx2x_fastpath *fp = &bp->fp[i];
5897
5898                 u16 bd_cons = fp->tx_bd_cons;
5899                 u16 sw_prod = fp->tx_pkt_prod;
5900                 u16 sw_cons = fp->tx_pkt_cons;
5901
5902                 while (sw_cons != sw_prod) {
5903                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5904                         sw_cons++;
5905                 }
5906         }
5907 }
5908
5909 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5910 {
5911         int i, j;
5912
5913         for_each_queue(bp, j) {
5914                 struct bnx2x_fastpath *fp = &bp->fp[j];
5915
5916                 for (i = 0; i < NUM_RX_BD; i++) {
5917                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5918                         struct sk_buff *skb = rx_buf->skb;
5919
5920                         if (skb == NULL)
5921                                 continue;
5922
5923                         pci_unmap_single(bp->pdev,
5924                                          pci_unmap_addr(rx_buf, mapping),
5925                                          bp->rx_buf_use_size,
5926                                          PCI_DMA_FROMDEVICE);
5927
5928                         rx_buf->skb = NULL;
5929                         dev_kfree_skb(skb);
5930                 }
5931                 if (!fp->disable_tpa)
5932                         bnx2x_free_tpa_pool(bp, fp,
5933                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5934         }
5935 }
5936
5937 static void bnx2x_free_skbs(struct bnx2x *bp)
5938 {
5939         bnx2x_free_tx_skbs(bp);
5940         bnx2x_free_rx_skbs(bp);
5941 }
5942
5943 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5944 {
5945         int i, offset = 1;
5946
5947         free_irq(bp->msix_table[0].vector, bp->dev);
5948         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5949            bp->msix_table[0].vector);
5950
5951         for_each_queue(bp, i) {
5952                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5953                    "state %x\n", i, bp->msix_table[i + offset].vector,
5954                    bnx2x_fp(bp, i, state));
5955
5956                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5957                         BNX2X_ERR("IRQ of fp #%d being freed while "
5958                                   "state != closed\n", i);
5959
5960                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5961         }
5962 }
5963
5964 static void bnx2x_free_irq(struct bnx2x *bp)
5965 {
5966         if (bp->flags & USING_MSIX_FLAG) {
5967                 bnx2x_free_msix_irqs(bp);
5968                 pci_disable_msix(bp->pdev);
5969                 bp->flags &= ~USING_MSIX_FLAG;
5970
5971         } else
5972                 free_irq(bp->pdev->irq, bp->dev);
5973 }
5974
5975 static int bnx2x_enable_msix(struct bnx2x *bp)
5976 {
5977         int i, rc, offset;
5978
5979         bp->msix_table[0].entry = 0;
5980         offset = 1;
5981         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5982
5983         for_each_queue(bp, i) {
5984                 int igu_vec = offset + i + BP_L_ID(bp);
5985
5986                 bp->msix_table[i + offset].entry = igu_vec;
5987                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5988                    "(fastpath #%u)\n", i + offset, igu_vec, i);
5989         }
5990
5991         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5992                              bp->num_queues + offset);
5993         if (rc) {
5994                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5995                 return -1;
5996         }
5997         bp->flags |= USING_MSIX_FLAG;
5998
5999         return 0;
6000 }
6001
6002 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6003 {
6004         int i, rc, offset = 1;
6005
6006         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6007                          bp->dev->name, bp->dev);
6008         if (rc) {
6009                 BNX2X_ERR("request sp irq failed\n");
6010                 return -EBUSY;
6011         }
6012
6013         for_each_queue(bp, i) {
6014                 rc = request_irq(bp->msix_table[i + offset].vector,
6015                                  bnx2x_msix_fp_int, 0,
6016                                  bp->dev->name, &bp->fp[i]);
6017                 if (rc) {
6018                         BNX2X_ERR("request fp #%d irq failed  rc %d\n",
6019                                   i + offset, rc);
6020                         bnx2x_free_msix_irqs(bp);
6021                         return -EBUSY;
6022                 }
6023
6024                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6025         }
6026
6027         return 0;
6028 }
6029
6030 static int bnx2x_req_irq(struct bnx2x *bp)
6031 {
6032         int rc;
6033
6034         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6035                          bp->dev->name, bp->dev);
6036         if (!rc)
6037                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6038
6039         return rc;
6040 }
6041
6042 /*
6043  * Init service functions
6044  */
6045
6046 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6047 {
6048         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6049         int port = BP_PORT(bp);
6050
6051         /* CAM allocation
6052          * unicasts 0-31:port0 32-63:port1
6053          * multicast 64-127:port0 128-191:port1
6054          */
6055         config->hdr.length_6b = 2;
6056         config->hdr.offset = port ? 31 : 0;
6057         config->hdr.client_id = BP_CL_ID(bp);
6058         config->hdr.reserved1 = 0;
6059
6060         /* primary MAC */
6061         config->config_table[0].cam_entry.msb_mac_addr =
6062                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6063         config->config_table[0].cam_entry.middle_mac_addr =
6064                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6065         config->config_table[0].cam_entry.lsb_mac_addr =
6066                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6067         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6068         config->config_table[0].target_table_entry.flags = 0;
6069         config->config_table[0].target_table_entry.client_id = 0;
6070         config->config_table[0].target_table_entry.vlan_id = 0;
6071
6072         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6073            config->config_table[0].cam_entry.msb_mac_addr,
6074            config->config_table[0].cam_entry.middle_mac_addr,
6075            config->config_table[0].cam_entry.lsb_mac_addr);
6076
6077         /* broadcast */
6078         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6079         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6080         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6081         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6082         config->config_table[1].target_table_entry.flags =
6083                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6084         config->config_table[1].target_table_entry.client_id = 0;
6085         config->config_table[1].target_table_entry.vlan_id = 0;
6086
6087         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6088                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6089                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6090 }
6091
6092 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6093 {
6094         struct mac_configuration_cmd_e1h *config =
6095                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6096
6097         if (bp->state != BNX2X_STATE_OPEN) {
6098                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6099                 return;
6100         }
6101
6102         /* CAM allocation for E1H
6103          * unicasts: by func number
6104          * multicast: 20+FUNC*20, 20 each
6105          */
6106         config->hdr.length_6b = 1;
6107         config->hdr.offset = BP_FUNC(bp);
6108         config->hdr.client_id = BP_CL_ID(bp);
6109         config->hdr.reserved1 = 0;
6110
6111         /* primary MAC */
6112         config->config_table[0].msb_mac_addr =
6113                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6114         config->config_table[0].middle_mac_addr =
6115                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6116         config->config_table[0].lsb_mac_addr =
6117                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6118         config->config_table[0].client_id = BP_L_ID(bp);
6119         config->config_table[0].vlan_id = 0;
6120         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6121         config->config_table[0].flags = BP_PORT(bp);
6122
6123         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6124            config->config_table[0].msb_mac_addr,
6125            config->config_table[0].middle_mac_addr,
6126            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6127
6128         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6129                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6130                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6131 }
6132
6133 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6134                              int *state_p, int poll)
6135 {
6136         /* can take a while if any port is running */
6137         int cnt = 500;
6138
6139         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6140            poll ? "polling" : "waiting", state, idx);
6141
6142         might_sleep();
6143         while (cnt--) {
6144                 if (poll) {
6145                         bnx2x_rx_int(bp->fp, 10);
6146                         /* if index is different from 0
6147                          * the reply for some commands will
6148                          * be on the none default queue
6149                          */
6150                         if (idx)
6151                                 bnx2x_rx_int(&bp->fp[idx], 10);
6152                 }
6153                 mb(); /* state is changed by bnx2x_sp_event() */
6154
6155                 if (*state_p == state)
6156                         return 0;
6157
6158                 msleep(1);
6159         }
6160
6161         /* timeout! */
6162         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6163                   poll ? "polling" : "waiting", state, idx);
6164 #ifdef BNX2X_STOP_ON_ERROR
6165         bnx2x_panic();
6166 #endif
6167
6168         return -EBUSY;
6169 }
6170
6171 static int bnx2x_setup_leading(struct bnx2x *bp)
6172 {
6173         int rc;
6174
6175         /* reset IGU state */
6176         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6177
6178         /* SETUP ramrod */
6179         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6180
6181         /* Wait for completion */
6182         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6183
6184         return rc;
6185 }
6186
6187 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6188 {
6189         /* reset IGU state */
6190         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6191
6192         /* SETUP ramrod */
6193         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6194         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6195
6196         /* Wait for completion */
6197         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6198                                  &(bp->fp[index].state), 0);
6199 }
6200
6201 static int bnx2x_poll(struct napi_struct *napi, int budget);
6202 static void bnx2x_set_rx_mode(struct net_device *dev);
6203
6204 /* must be called with rtnl_lock */
6205 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6206 {
6207         u32 load_code;
6208         int i, rc;
6209
6210 #ifdef BNX2X_STOP_ON_ERROR
6211         if (unlikely(bp->panic))
6212                 return -EPERM;
6213 #endif
6214
6215         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6216
6217         /* Send LOAD_REQUEST command to MCP
6218            Returns the type of LOAD command:
6219            if it is the first port to be initialized
6220            common blocks should be initialized, otherwise - not
6221         */
6222         if (!BP_NOMCP(bp)) {
6223                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6224                 if (!load_code) {
6225                         BNX2X_ERR("MCP response failure, aborting\n");
6226                         return -EBUSY;
6227                 }
6228                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6229                         return -EBUSY; /* other port in diagnostic mode */
6230
6231         } else {
6232                 int port = BP_PORT(bp);
6233
6234                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6235                    load_count[0], load_count[1], load_count[2]);
6236                 load_count[0]++;
6237                 load_count[1 + port]++;
6238                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6239                    load_count[0], load_count[1], load_count[2]);
6240                 if (load_count[0] == 1)
6241                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6242                 else if (load_count[1 + port] == 1)
6243                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6244                 else
6245                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6246         }
6247
6248         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6249             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6250                 bp->port.pmf = 1;
6251         else
6252                 bp->port.pmf = 0;
6253         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6254
6255         /* if we can't use MSI-X we only need one fp,
6256          * so try to enable MSI-X with the requested number of fp's
6257          * and fallback to inta with one fp
6258          */
6259         if (use_inta) {
6260                 bp->num_queues = 1;
6261
6262         } else {
6263                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6264                         /* user requested number */
6265                         bp->num_queues = use_multi;
6266
6267                 else if (use_multi)
6268                         bp->num_queues = min_t(u32, num_online_cpus(),
6269                                                BP_MAX_QUEUES(bp));
6270                 else
6271                         bp->num_queues = 1;
6272
6273                 if (bnx2x_enable_msix(bp)) {
6274                         /* failed to enable MSI-X */
6275                         bp->num_queues = 1;
6276                         if (use_multi)
6277                                 BNX2X_ERR("Multi requested but failed"
6278                                           " to enable MSI-X\n");
6279                 }
6280         }
6281         DP(NETIF_MSG_IFUP,
6282            "set number of queues to %d\n", bp->num_queues);
6283
6284         if (bnx2x_alloc_mem(bp))
6285                 return -ENOMEM;
6286
6287         for_each_queue(bp, i)
6288                 bnx2x_fp(bp, i, disable_tpa) =
6289                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6290
6291         if (bp->flags & USING_MSIX_FLAG) {
6292                 rc = bnx2x_req_msix_irqs(bp);
6293                 if (rc) {
6294                         pci_disable_msix(bp->pdev);
6295                         goto load_error;
6296                 }
6297         } else {
6298                 bnx2x_ack_int(bp);
6299                 rc = bnx2x_req_irq(bp);
6300                 if (rc) {
6301                         BNX2X_ERR("IRQ request failed, aborting\n");
6302                         goto load_error;
6303                 }
6304         }
6305
6306         for_each_queue(bp, i)
6307                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6308                                bnx2x_poll, 128);
6309
6310         /* Initialize HW */
6311         rc = bnx2x_init_hw(bp, load_code);
6312         if (rc) {
6313                 BNX2X_ERR("HW init failed, aborting\n");
6314                 goto load_error;
6315         }
6316
6317         /* Setup NIC internals and enable interrupts */
6318         bnx2x_nic_init(bp, load_code);
6319
6320         /* Send LOAD_DONE command to MCP */
6321         if (!BP_NOMCP(bp)) {
6322                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6323                 if (!load_code) {
6324                         BNX2X_ERR("MCP response failure, aborting\n");
6325                         rc = -EBUSY;
6326                         goto load_int_disable;
6327                 }
6328         }
6329
6330         bnx2x_stats_init(bp);
6331
6332         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6333
6334         /* Enable Rx interrupt handling before sending the ramrod
6335            as it's completed on Rx FP queue */
6336         for_each_queue(bp, i)
6337                 napi_enable(&bnx2x_fp(bp, i, napi));
6338
6339         /* Enable interrupt handling */
6340         atomic_set(&bp->intr_sem, 0);
6341
6342         rc = bnx2x_setup_leading(bp);
6343         if (rc) {
6344                 BNX2X_ERR("Setup leading failed!\n");
6345                 goto load_stop_netif;
6346         }
6347
6348         if (CHIP_IS_E1H(bp))
6349                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6350                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6351                         bp->state = BNX2X_STATE_DISABLED;
6352                 }
6353
6354         if (bp->state == BNX2X_STATE_OPEN)
6355                 for_each_nondefault_queue(bp, i) {
6356                         rc = bnx2x_setup_multi(bp, i);
6357                         if (rc)
6358                                 goto load_stop_netif;
6359                 }
6360
6361         if (CHIP_IS_E1(bp))
6362                 bnx2x_set_mac_addr_e1(bp);
6363         else
6364                 bnx2x_set_mac_addr_e1h(bp);
6365
6366         if (bp->port.pmf)
6367                 bnx2x_initial_phy_init(bp);
6368
6369         /* Start fast path */
6370         switch (load_mode) {
6371         case LOAD_NORMAL:
6372                 /* Tx queue should be only reenabled */
6373                 netif_wake_queue(bp->dev);
6374                 bnx2x_set_rx_mode(bp->dev);
6375                 break;
6376
6377         case LOAD_OPEN:
6378                 netif_start_queue(bp->dev);
6379                 bnx2x_set_rx_mode(bp->dev);
6380                 if (bp->flags & USING_MSIX_FLAG)
6381                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6382                                bp->dev->name);
6383                 break;
6384
6385         case LOAD_DIAG:
6386                 bnx2x_set_rx_mode(bp->dev);
6387                 bp->state = BNX2X_STATE_DIAG;
6388                 break;
6389
6390         default:
6391                 break;
6392         }
6393
6394         if (!bp->port.pmf)
6395                 bnx2x__link_status_update(bp);
6396
6397         /* start the timer */
6398         mod_timer(&bp->timer, jiffies + bp->current_interval);
6399
6400
6401         return 0;
6402
6403 load_stop_netif:
6404         for_each_queue(bp, i)
6405                 napi_disable(&bnx2x_fp(bp, i, napi));
6406
6407 load_int_disable:
6408         bnx2x_int_disable_sync(bp);
6409
6410         /* Release IRQs */
6411         bnx2x_free_irq(bp);
6412
6413         /* Free SKBs, SGEs, TPA pool and driver internals */
6414         bnx2x_free_skbs(bp);
6415         for_each_queue(bp, i)
6416                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6417                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6418 load_error:
6419         bnx2x_free_mem(bp);
6420
6421         /* TBD we really need to reset the chip
6422            if we want to recover from this */
6423         return rc;
6424 }
6425
6426 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6427 {
6428         int rc;
6429
6430         /* halt the connection */
6431         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6432         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6433
6434         /* Wait for completion */
6435         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6436                                &(bp->fp[index].state), 1);
6437         if (rc) /* timeout */
6438                 return rc;
6439
6440         /* delete cfc entry */
6441         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6442
6443         /* Wait for completion */
6444         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6445                                &(bp->fp[index].state), 1);
6446         return rc;
6447 }
6448
6449 static int bnx2x_stop_leading(struct bnx2x *bp)
6450 {
6451         u16 dsb_sp_prod_idx;
6452         /* if the other port is handling traffic,
6453            this can take a lot of time */
6454         int cnt = 500;
6455         int rc;
6456
6457         might_sleep();
6458
6459         /* Send HALT ramrod */
6460         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6461         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6462
6463         /* Wait for completion */
6464         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6465                                &(bp->fp[0].state), 1);
6466         if (rc) /* timeout */
6467                 return rc;
6468
6469         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6470
6471         /* Send PORT_DELETE ramrod */
6472         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6473
6474         /* Wait for completion to arrive on default status block
6475            we are going to reset the chip anyway
6476            so there is not much to do if this times out
6477          */
6478         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6479                 msleep(1);
6480                 if (!cnt) {
6481                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6482                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6483                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6484 #ifdef BNX2X_STOP_ON_ERROR
6485                         bnx2x_panic();
6486 #else
6487                         rc = -EBUSY;
6488 #endif
6489                         break;
6490                 }
6491                 cnt--;
6492                 msleep(1);
6493         }
6494         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6495         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6496
6497         return rc;
6498 }
6499
6500 static void bnx2x_reset_func(struct bnx2x *bp)
6501 {
6502         int port = BP_PORT(bp);
6503         int func = BP_FUNC(bp);
6504         int base, i;
6505
6506         /* Configure IGU */
6507         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6508         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6509
6510         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6511
6512         /* Clear ILT */
6513         base = FUNC_ILT_BASE(func);
6514         for (i = base; i < base + ILT_PER_FUNC; i++)
6515                 bnx2x_ilt_wr(bp, i, 0);
6516 }
6517
6518 static void bnx2x_reset_port(struct bnx2x *bp)
6519 {
6520         int port = BP_PORT(bp);
6521         u32 val;
6522
6523         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6524
6525         /* Do not rcv packets to BRB */
6526         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6527         /* Do not direct rcv packets that are not for MCP to the BRB */
6528         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6529                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6530
6531         /* Configure AEU */
6532         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6533
6534         msleep(100);
6535         /* Check for BRB port occupancy */
6536         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6537         if (val)
6538                 DP(NETIF_MSG_IFDOWN,
6539                    "BRB1 is not empty  %d blooks are occupied\n", val);
6540
6541         /* TODO: Close Doorbell port? */
6542 }
6543
6544 static void bnx2x_reset_common(struct bnx2x *bp)
6545 {
6546         /* reset_common */
6547         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6548                0xd3ffff7f);
6549         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6550 }
6551
6552 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6553 {
6554         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6555            BP_FUNC(bp), reset_code);
6556
6557         switch (reset_code) {
6558         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6559                 bnx2x_reset_port(bp);
6560                 bnx2x_reset_func(bp);
6561                 bnx2x_reset_common(bp);
6562                 break;
6563
6564         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6565                 bnx2x_reset_port(bp);
6566                 bnx2x_reset_func(bp);
6567                 break;
6568
6569         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6570                 bnx2x_reset_func(bp);
6571                 break;
6572
6573         default:
6574                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6575                 break;
6576         }
6577 }
6578
6579 /* msut be called with rtnl_lock */
6580 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6581 {
6582         int port = BP_PORT(bp);
6583         u32 reset_code = 0;
6584         int i, cnt, rc;
6585
6586         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6587
6588         bp->rx_mode = BNX2X_RX_MODE_NONE;
6589         bnx2x_set_storm_rx_mode(bp);
6590
6591         if (netif_running(bp->dev)) {
6592                 netif_tx_disable(bp->dev);
6593                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6594         }
6595
6596         del_timer_sync(&bp->timer);
6597         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6598                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6599         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6600
6601         /* Wait until tx fast path tasks complete */
6602         for_each_queue(bp, i) {
6603                 struct bnx2x_fastpath *fp = &bp->fp[i];
6604
6605                 cnt = 1000;
6606                 smp_rmb();
6607                 while (BNX2X_HAS_TX_WORK(fp)) {
6608
6609                         if (!netif_running(bp->dev))
6610                                 bnx2x_tx_int(fp, 1000);
6611
6612                         if (!cnt) {
6613                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6614                                           i);
6615 #ifdef BNX2X_STOP_ON_ERROR
6616                                 bnx2x_panic();
6617                                 return -EBUSY;
6618 #else
6619                                 break;
6620 #endif
6621                         }
6622                         cnt--;
6623                         msleep(1);
6624                         smp_rmb();
6625                 }
6626         }
6627
6628         /* Give HW time to discard old tx messages */
6629         msleep(1);
6630
6631         for_each_queue(bp, i)
6632                 napi_disable(&bnx2x_fp(bp, i, napi));
6633         /* Disable interrupts after Tx and Rx are disabled on stack level */
6634         bnx2x_int_disable_sync(bp);
6635
6636         /* Release IRQs */
6637         bnx2x_free_irq(bp);
6638
6639         if (unload_mode == UNLOAD_NORMAL)
6640                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6641
6642         else if (bp->flags & NO_WOL_FLAG) {
6643                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6644                 if (CHIP_IS_E1H(bp))
6645                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6646
6647         } else if (bp->wol) {
6648                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6649                 u8 *mac_addr = bp->dev->dev_addr;
6650                 u32 val;
6651                 /* The mac address is written to entries 1-4 to
6652                    preserve entry 0 which is used by the PMF */
6653                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6654
6655                 val = (mac_addr[0] << 8) | mac_addr[1];
6656                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6657
6658                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6659                       (mac_addr[4] << 8) | mac_addr[5];
6660                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6661
6662                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6663
6664         } else
6665                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6666
6667         if (CHIP_IS_E1H(bp))
6668                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6669
6670         /* Close multi and leading connections
6671            Completions for ramrods are collected in a synchronous way */
6672         for_each_nondefault_queue(bp, i)
6673                 if (bnx2x_stop_multi(bp, i))
6674                         goto unload_error;
6675
6676         rc = bnx2x_stop_leading(bp);
6677         if (rc) {
6678                 BNX2X_ERR("Stop leading failed!\n");
6679 #ifdef BNX2X_STOP_ON_ERROR
6680                 return -EBUSY;
6681 #else
6682                 goto unload_error;
6683 #endif
6684         }
6685
6686 unload_error:
6687         if (!BP_NOMCP(bp))
6688                 reset_code = bnx2x_fw_command(bp, reset_code);
6689         else {
6690                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6691                    load_count[0], load_count[1], load_count[2]);
6692                 load_count[0]--;
6693                 load_count[1 + port]--;
6694                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6695                    load_count[0], load_count[1], load_count[2]);
6696                 if (load_count[0] == 0)
6697                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6698                 else if (load_count[1 + port] == 0)
6699                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6700                 else
6701                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6702         }
6703
6704         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6705             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6706                 bnx2x__link_reset(bp);
6707
6708         /* Reset the chip */
6709         bnx2x_reset_chip(bp, reset_code);
6710
6711         /* Report UNLOAD_DONE to MCP */
6712         if (!BP_NOMCP(bp))
6713                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6714
6715         /* Free SKBs, SGEs, TPA pool and driver internals */
6716         bnx2x_free_skbs(bp);
6717         for_each_queue(bp, i)
6718                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6719                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6720         bnx2x_free_mem(bp);
6721
6722         bp->state = BNX2X_STATE_CLOSED;
6723
6724         netif_carrier_off(bp->dev);
6725
6726         return 0;
6727 }
6728
6729 static void bnx2x_reset_task(struct work_struct *work)
6730 {
6731         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6732
6733 #ifdef BNX2X_STOP_ON_ERROR
6734         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6735                   " so reset not done to allow debug dump,\n"
6736          KERN_ERR " you will need to reboot when done\n");
6737         return;
6738 #endif
6739
6740         rtnl_lock();
6741
6742         if (!netif_running(bp->dev))
6743                 goto reset_task_exit;
6744
6745         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6746         bnx2x_nic_load(bp, LOAD_NORMAL);
6747
6748 reset_task_exit:
6749         rtnl_unlock();
6750 }
6751
6752 /* end of nic load/unload */
6753
6754 /* ethtool_ops */
6755
6756 /*
6757  * Init service functions
6758  */
6759
6760 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6761 {
6762         u32 val;
6763
6764         /* Check if there is any driver already loaded */
6765         val = REG_RD(bp, MISC_REG_UNPREPARED);
6766         if (val == 0x1) {
6767                 /* Check if it is the UNDI driver
6768                  * UNDI driver initializes CID offset for normal bell to 0x7
6769                  */
6770                 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6771                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6772                 if (val == 0x7) {
6773                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6774                         /* save our func */
6775                         int func = BP_FUNC(bp);
6776                         u32 swap_en;
6777                         u32 swap_val;
6778
6779                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6780
6781                         /* try unload UNDI on port 0 */
6782                         bp->func = 0;
6783                         bp->fw_seq =
6784                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6785                                 DRV_MSG_SEQ_NUMBER_MASK);
6786                         reset_code = bnx2x_fw_command(bp, reset_code);
6787
6788                         /* if UNDI is loaded on the other port */
6789                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6790
6791                                 /* send "DONE" for previous unload */
6792                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6793
6794                                 /* unload UNDI on port 1 */
6795                                 bp->func = 1;
6796                                 bp->fw_seq =
6797                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6798                                         DRV_MSG_SEQ_NUMBER_MASK);
6799                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6800
6801                                 bnx2x_fw_command(bp, reset_code);
6802                         }
6803
6804                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6805                                     HC_REG_CONFIG_0), 0x1000);
6806
6807                         /* close input traffic and wait for it */
6808                         /* Do not rcv packets to BRB */
6809                         REG_WR(bp,
6810                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6811                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6812                         /* Do not direct rcv packets that are not for MCP to
6813                          * the BRB */
6814                         REG_WR(bp,
6815                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6816                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6817                         /* clear AEU */
6818                         REG_WR(bp,
6819                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6820                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6821                         msleep(10);
6822
6823                         /* save NIG port swap info */
6824                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6825                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6826                         /* reset device */
6827                         REG_WR(bp,
6828                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6829                                0xd3ffffff);
6830                         REG_WR(bp,
6831                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6832                                0x1403);
6833                         /* take the NIG out of reset and restore swap values */
6834                         REG_WR(bp,
6835                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6836                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6837                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6838                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6839
6840                         /* send unload done to the MCP */
6841                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6842
6843                         /* restore our func and fw_seq */
6844                         bp->func = func;
6845                         bp->fw_seq =
6846                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6847                                 DRV_MSG_SEQ_NUMBER_MASK);
6848                 }
6849                 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_UNDI);
6850         }
6851 }
6852
6853 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6854 {
6855         u32 val, val2, val3, val4, id;
6856
6857         /* Get the chip revision id and number. */
6858         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6859         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6860         id = ((val & 0xffff) << 16);
6861         val = REG_RD(bp, MISC_REG_CHIP_REV);
6862         id |= ((val & 0xf) << 12);
6863         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6864         id |= ((val & 0xff) << 4);
6865         REG_RD(bp, MISC_REG_BOND_ID);
6866         id |= (val & 0xf);
6867         bp->common.chip_id = id;
6868         bp->link_params.chip_id = bp->common.chip_id;
6869         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6870
6871         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6872         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6873                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6874         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6875                        bp->common.flash_size, bp->common.flash_size);
6876
6877         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6878         bp->link_params.shmem_base = bp->common.shmem_base;
6879         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6880
6881         if (!bp->common.shmem_base ||
6882             (bp->common.shmem_base < 0xA0000) ||
6883             (bp->common.shmem_base >= 0xC0000)) {
6884                 BNX2X_DEV_INFO("MCP not active\n");
6885                 bp->flags |= NO_MCP_FLAG;
6886                 return;
6887         }
6888
6889         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6890         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6891                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6892                 BNX2X_ERR("BAD MCP validity signature\n");
6893
6894         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6895         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6896
6897         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6898                        bp->common.hw_config, bp->common.board);
6899
6900         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6901                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6902                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6903
6904         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6905         bp->common.bc_ver = val;
6906         BNX2X_DEV_INFO("bc_ver %X\n", val);
6907         if (val < BNX2X_BC_VER) {
6908                 /* for now only warn
6909                  * later we might need to enforce this */
6910                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6911                           " please upgrade BC\n", BNX2X_BC_VER, val);
6912         }
6913         BNX2X_DEV_INFO("%sWoL Capable\n",
6914                        (bp->flags & NO_WOL_FLAG)? "Not " : "");
6915
6916         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6917         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6918         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6919         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6920
6921         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6922                val, val2, val3, val4);
6923 }
6924
6925 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6926                                                     u32 switch_cfg)
6927 {
6928         int port = BP_PORT(bp);
6929         u32 ext_phy_type;
6930
6931         switch (switch_cfg) {
6932         case SWITCH_CFG_1G:
6933                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6934
6935                 ext_phy_type =
6936                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6937                 switch (ext_phy_type) {
6938                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6939                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6940                                        ext_phy_type);
6941
6942                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6943                                                SUPPORTED_10baseT_Full |
6944                                                SUPPORTED_100baseT_Half |
6945                                                SUPPORTED_100baseT_Full |
6946                                                SUPPORTED_1000baseT_Full |
6947                                                SUPPORTED_2500baseX_Full |
6948                                                SUPPORTED_TP |
6949                                                SUPPORTED_FIBRE |
6950                                                SUPPORTED_Autoneg |
6951                                                SUPPORTED_Pause |
6952                                                SUPPORTED_Asym_Pause);
6953                         break;
6954
6955                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6956                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6957                                        ext_phy_type);
6958
6959                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6960                                                SUPPORTED_10baseT_Full |
6961                                                SUPPORTED_100baseT_Half |
6962                                                SUPPORTED_100baseT_Full |
6963                                                SUPPORTED_1000baseT_Full |
6964                                                SUPPORTED_TP |
6965                                                SUPPORTED_FIBRE |
6966                                                SUPPORTED_Autoneg |
6967                                                SUPPORTED_Pause |
6968                                                SUPPORTED_Asym_Pause);
6969                         break;
6970
6971                 default:
6972                         BNX2X_ERR("NVRAM config error. "
6973                                   "BAD SerDes ext_phy_config 0x%x\n",
6974                                   bp->link_params.ext_phy_config);
6975                         return;
6976                 }
6977
6978                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6979                                            port*0x10);
6980                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6981                 break;
6982
6983         case SWITCH_CFG_10G:
6984                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6985
6986                 ext_phy_type =
6987                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6988                 switch (ext_phy_type) {
6989                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6990                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6991                                        ext_phy_type);
6992
6993                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6994                                                SUPPORTED_10baseT_Full |
6995                                                SUPPORTED_100baseT_Half |
6996                                                SUPPORTED_100baseT_Full |
6997                                                SUPPORTED_1000baseT_Full |
6998                                                SUPPORTED_2500baseX_Full |
6999                                                SUPPORTED_10000baseT_Full |
7000                                                SUPPORTED_TP |
7001                                                SUPPORTED_FIBRE |
7002                                                SUPPORTED_Autoneg |
7003                                                SUPPORTED_Pause |
7004                                                SUPPORTED_Asym_Pause);
7005                         break;
7006
7007                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7008                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7009                                        ext_phy_type);
7010
7011                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7012                                                SUPPORTED_FIBRE |
7013                                                SUPPORTED_Pause |
7014                                                SUPPORTED_Asym_Pause);
7015                         break;
7016
7017                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7018                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7019                                        ext_phy_type);
7020
7021                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7022                                                SUPPORTED_1000baseT_Full |
7023                                                SUPPORTED_FIBRE |
7024                                                SUPPORTED_Pause |
7025                                                SUPPORTED_Asym_Pause);
7026                         break;
7027
7028                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7029                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7030                                        ext_phy_type);
7031
7032                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7033                                                SUPPORTED_1000baseT_Full |
7034                                                SUPPORTED_FIBRE |
7035                                                SUPPORTED_Autoneg |
7036                                                SUPPORTED_Pause |
7037                                                SUPPORTED_Asym_Pause);
7038                         break;
7039
7040                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7041                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7042                                        ext_phy_type);
7043
7044                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7045                                                SUPPORTED_2500baseX_Full |
7046                                                SUPPORTED_1000baseT_Full |
7047                                                SUPPORTED_FIBRE |
7048                                                SUPPORTED_Autoneg |
7049                                                SUPPORTED_Pause |
7050                                                SUPPORTED_Asym_Pause);
7051                         break;
7052
7053                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7054                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7055                                        ext_phy_type);
7056
7057                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7058                                                SUPPORTED_TP |
7059                                                SUPPORTED_Autoneg |
7060                                                SUPPORTED_Pause |
7061                                                SUPPORTED_Asym_Pause);
7062                         break;
7063
7064                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7065                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7066                                   bp->link_params.ext_phy_config);
7067                         break;
7068
7069                 default:
7070                         BNX2X_ERR("NVRAM config error. "
7071                                   "BAD XGXS ext_phy_config 0x%x\n",
7072                                   bp->link_params.ext_phy_config);
7073                         return;
7074                 }
7075
7076                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7077                                            port*0x18);
7078                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7079
7080                 break;
7081
7082         default:
7083                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7084                           bp->port.link_config);
7085                 return;
7086         }
7087         bp->link_params.phy_addr = bp->port.phy_addr;
7088
7089         /* mask what we support according to speed_cap_mask */
7090         if (!(bp->link_params.speed_cap_mask &
7091                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7092                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7093
7094         if (!(bp->link_params.speed_cap_mask &
7095                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7096                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7097
7098         if (!(bp->link_params.speed_cap_mask &
7099                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7100                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7101
7102         if (!(bp->link_params.speed_cap_mask &
7103                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7104                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7105
7106         if (!(bp->link_params.speed_cap_mask &
7107                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7108                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7109                                         SUPPORTED_1000baseT_Full);
7110
7111         if (!(bp->link_params.speed_cap_mask &
7112                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7113                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7114
7115         if (!(bp->link_params.speed_cap_mask &
7116                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7117                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7118
7119         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7120 }
7121
7122 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7123 {
7124         bp->link_params.req_duplex = DUPLEX_FULL;
7125
7126         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7127         case PORT_FEATURE_LINK_SPEED_AUTO:
7128                 if (bp->port.supported & SUPPORTED_Autoneg) {
7129                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7130                         bp->port.advertising = bp->port.supported;
7131                 } else {
7132                         u32 ext_phy_type =
7133                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7134
7135                         if ((ext_phy_type ==
7136                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7137                             (ext_phy_type ==
7138                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7139                                 /* force 10G, no AN */
7140                                 bp->link_params.req_line_speed = SPEED_10000;
7141                                 bp->port.advertising =
7142                                                 (ADVERTISED_10000baseT_Full |
7143                                                  ADVERTISED_FIBRE);
7144                                 break;
7145                         }
7146                         BNX2X_ERR("NVRAM config error. "
7147                                   "Invalid link_config 0x%x"
7148                                   "  Autoneg not supported\n",
7149                                   bp->port.link_config);
7150                         return;
7151                 }
7152                 break;
7153
7154         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7155                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7156                         bp->link_params.req_line_speed = SPEED_10;
7157                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7158                                                 ADVERTISED_TP);
7159                 } else {
7160                         BNX2X_ERR("NVRAM config error. "
7161                                   "Invalid link_config 0x%x"
7162                                   "  speed_cap_mask 0x%x\n",
7163                                   bp->port.link_config,
7164                                   bp->link_params.speed_cap_mask);
7165                         return;
7166                 }
7167                 break;
7168
7169         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7170                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7171                         bp->link_params.req_line_speed = SPEED_10;
7172                         bp->link_params.req_duplex = DUPLEX_HALF;
7173                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7174                                                 ADVERTISED_TP);
7175                 } else {
7176                         BNX2X_ERR("NVRAM config error. "
7177                                   "Invalid link_config 0x%x"
7178                                   "  speed_cap_mask 0x%x\n",
7179                                   bp->port.link_config,
7180                                   bp->link_params.speed_cap_mask);
7181                         return;
7182                 }
7183                 break;
7184
7185         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7186                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7187                         bp->link_params.req_line_speed = SPEED_100;
7188                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7189                                                 ADVERTISED_TP);
7190                 } else {
7191                         BNX2X_ERR("NVRAM config error. "
7192                                   "Invalid link_config 0x%x"
7193                                   "  speed_cap_mask 0x%x\n",
7194                                   bp->port.link_config,
7195                                   bp->link_params.speed_cap_mask);
7196                         return;
7197                 }
7198                 break;
7199
7200         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7201                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7202                         bp->link_params.req_line_speed = SPEED_100;
7203                         bp->link_params.req_duplex = DUPLEX_HALF;
7204                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7205                                                 ADVERTISED_TP);
7206                 } else {
7207                         BNX2X_ERR("NVRAM config error. "
7208                                   "Invalid link_config 0x%x"
7209                                   "  speed_cap_mask 0x%x\n",
7210                                   bp->port.link_config,
7211                                   bp->link_params.speed_cap_mask);
7212                         return;
7213                 }
7214                 break;
7215
7216         case PORT_FEATURE_LINK_SPEED_1G:
7217                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7218                         bp->link_params.req_line_speed = SPEED_1000;
7219                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7220                                                 ADVERTISED_TP);
7221                 } else {
7222                         BNX2X_ERR("NVRAM config error. "
7223                                   "Invalid link_config 0x%x"
7224                                   "  speed_cap_mask 0x%x\n",
7225                                   bp->port.link_config,
7226                                   bp->link_params.speed_cap_mask);
7227                         return;
7228                 }
7229                 break;
7230
7231         case PORT_FEATURE_LINK_SPEED_2_5G:
7232                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7233                         bp->link_params.req_line_speed = SPEED_2500;
7234                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7235                                                 ADVERTISED_TP);
7236                 } else {
7237                         BNX2X_ERR("NVRAM config error. "
7238                                   "Invalid link_config 0x%x"
7239                                   "  speed_cap_mask 0x%x\n",
7240                                   bp->port.link_config,
7241                                   bp->link_params.speed_cap_mask);
7242                         return;
7243                 }
7244                 break;
7245
7246         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7247         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7248         case PORT_FEATURE_LINK_SPEED_10G_KR:
7249                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7250                         bp->link_params.req_line_speed = SPEED_10000;
7251                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7252                                                 ADVERTISED_FIBRE);
7253                 } else {
7254                         BNX2X_ERR("NVRAM config error. "
7255                                   "Invalid link_config 0x%x"
7256                                   "  speed_cap_mask 0x%x\n",
7257                                   bp->port.link_config,
7258                                   bp->link_params.speed_cap_mask);
7259                         return;
7260                 }
7261                 break;
7262
7263         default:
7264                 BNX2X_ERR("NVRAM config error. "
7265                           "BAD link speed link_config 0x%x\n",
7266                           bp->port.link_config);
7267                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7268                 bp->port.advertising = bp->port.supported;
7269                 break;
7270         }
7271
7272         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7273                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7274         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7275             !(bp->port.supported & SUPPORTED_Autoneg))
7276                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7277
7278         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7279                        "  advertising 0x%x\n",
7280                        bp->link_params.req_line_speed,
7281                        bp->link_params.req_duplex,
7282                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7283 }
7284
7285 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7286 {
7287         int port = BP_PORT(bp);
7288         u32 val, val2;
7289
7290         bp->link_params.bp = bp;
7291         bp->link_params.port = port;
7292
7293         bp->link_params.serdes_config =
7294                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7295         bp->link_params.lane_config =
7296                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7297         bp->link_params.ext_phy_config =
7298                 SHMEM_RD(bp,
7299                          dev_info.port_hw_config[port].external_phy_config);
7300         bp->link_params.speed_cap_mask =
7301                 SHMEM_RD(bp,
7302                          dev_info.port_hw_config[port].speed_capability_mask);
7303
7304         bp->port.link_config =
7305                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7306
7307         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7308              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7309                        "  link_config 0x%08x\n",
7310                        bp->link_params.serdes_config,
7311                        bp->link_params.lane_config,
7312                        bp->link_params.ext_phy_config,
7313                        bp->link_params.speed_cap_mask, bp->port.link_config);
7314
7315         bp->link_params.switch_cfg = (bp->port.link_config &
7316                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7317         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7318
7319         bnx2x_link_settings_requested(bp);
7320
7321         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7322         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7323         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7324         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7325         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7326         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7327         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7328         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7329         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7330         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7331 }
7332
7333 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7334 {
7335         int func = BP_FUNC(bp);
7336         u32 val, val2;
7337         int rc = 0;
7338
7339         bnx2x_get_common_hwinfo(bp);
7340
7341         bp->e1hov = 0;
7342         bp->e1hmf = 0;
7343         if (CHIP_IS_E1H(bp)) {
7344                 bp->mf_config =
7345                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7346
7347                 val =
7348                    (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7349                     FUNC_MF_CFG_E1HOV_TAG_MASK);
7350                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7351
7352                         bp->e1hov = val;
7353                         bp->e1hmf = 1;
7354                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7355                                        "(0x%04x)\n",
7356                                        func, bp->e1hov, bp->e1hov);
7357                 } else {
7358                         BNX2X_DEV_INFO("Single function mode\n");
7359                         if (BP_E1HVN(bp)) {
7360                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7361                                           "  aborting\n", func);
7362                                 rc = -EPERM;
7363                         }
7364                 }
7365         }
7366
7367         if (!BP_NOMCP(bp)) {
7368                 bnx2x_get_port_hwinfo(bp);
7369
7370                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7371                               DRV_MSG_SEQ_NUMBER_MASK);
7372                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7373         }
7374
7375         if (IS_E1HMF(bp)) {
7376                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7377                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7378                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7379                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7380                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7381                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7382                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7383                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7384                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7385                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7386                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7387                                ETH_ALEN);
7388                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7389                                ETH_ALEN);
7390                 }
7391
7392                 return rc;
7393         }
7394
7395         if (BP_NOMCP(bp)) {
7396                 /* only supposed to happen on emulation/FPGA */
7397                 BNX2X_ERR("warning rendom MAC workaround active\n");
7398                 random_ether_addr(bp->dev->dev_addr);
7399                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7400         }
7401
7402         return rc;
7403 }
7404
7405 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7406 {
7407         int func = BP_FUNC(bp);
7408         int rc;
7409
7410         /* Disable interrupt handling until HW is initialized */
7411         atomic_set(&bp->intr_sem, 1);
7412
7413         mutex_init(&bp->port.phy_mutex);
7414
7415         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7416         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7417
7418         rc = bnx2x_get_hwinfo(bp);
7419
7420         /* need to reset chip if undi was active */
7421         if (!BP_NOMCP(bp))
7422                 bnx2x_undi_unload(bp);
7423
7424         if (CHIP_REV_IS_FPGA(bp))
7425                 printk(KERN_ERR PFX "FPGA detected\n");
7426
7427         if (BP_NOMCP(bp) && (func == 0))
7428                 printk(KERN_ERR PFX
7429                        "MCP disabled, must load devices in order!\n");
7430
7431         /* Set TPA flags */
7432         if (disable_tpa) {
7433                 bp->flags &= ~TPA_ENABLE_FLAG;
7434                 bp->dev->features &= ~NETIF_F_LRO;
7435         } else {
7436                 bp->flags |= TPA_ENABLE_FLAG;
7437                 bp->dev->features |= NETIF_F_LRO;
7438         }
7439
7440
7441         bp->tx_ring_size = MAX_TX_AVAIL;
7442         bp->rx_ring_size = MAX_RX_AVAIL;
7443
7444         bp->rx_csum = 1;
7445         bp->rx_offset = 0;
7446
7447         bp->tx_ticks = 50;
7448         bp->rx_ticks = 25;
7449
7450         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7451         bp->current_interval = (poll ? poll : bp->timer_interval);
7452
7453         init_timer(&bp->timer);
7454         bp->timer.expires = jiffies + bp->current_interval;
7455         bp->timer.data = (unsigned long) bp;
7456         bp->timer.function = bnx2x_timer;
7457
7458         return rc;
7459 }
7460
7461 /*
7462  * ethtool service functions
7463  */
7464
7465 /* All ethtool functions called with rtnl_lock */
7466
7467 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7468 {
7469         struct bnx2x *bp = netdev_priv(dev);
7470
7471         cmd->supported = bp->port.supported;
7472         cmd->advertising = bp->port.advertising;
7473
7474         if (netif_carrier_ok(dev)) {
7475                 cmd->speed = bp->link_vars.line_speed;
7476                 cmd->duplex = bp->link_vars.duplex;
7477         } else {
7478                 cmd->speed = bp->link_params.req_line_speed;
7479                 cmd->duplex = bp->link_params.req_duplex;
7480         }
7481         if (IS_E1HMF(bp)) {
7482                 u16 vn_max_rate;
7483
7484                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7485                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7486                 if (vn_max_rate < cmd->speed)
7487                         cmd->speed = vn_max_rate;
7488         }
7489
7490         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7491                 u32 ext_phy_type =
7492                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7493
7494                 switch (ext_phy_type) {
7495                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7496                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7497                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7498                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7499                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7500                         cmd->port = PORT_FIBRE;
7501                         break;
7502
7503                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7504                         cmd->port = PORT_TP;
7505                         break;
7506
7507                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7508                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7509                                   bp->link_params.ext_phy_config);
7510                         break;
7511
7512                 default:
7513                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7514                            bp->link_params.ext_phy_config);
7515                         break;
7516                 }
7517         } else
7518                 cmd->port = PORT_TP;
7519
7520         cmd->phy_address = bp->port.phy_addr;
7521         cmd->transceiver = XCVR_INTERNAL;
7522
7523         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7524                 cmd->autoneg = AUTONEG_ENABLE;
7525         else
7526                 cmd->autoneg = AUTONEG_DISABLE;
7527
7528         cmd->maxtxpkt = 0;
7529         cmd->maxrxpkt = 0;
7530
7531         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7532            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7533            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7534            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7535            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7536            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7537            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7538
7539         return 0;
7540 }
7541
7542 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7543 {
7544         struct bnx2x *bp = netdev_priv(dev);
7545         u32 advertising;
7546
7547         if (IS_E1HMF(bp))
7548                 return 0;
7549
7550         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7551            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7552            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7553            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7554            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7555            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7556            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7557
7558         if (cmd->autoneg == AUTONEG_ENABLE) {
7559                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7560                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7561                         return -EINVAL;
7562                 }
7563
7564                 /* advertise the requested speed and duplex if supported */
7565                 cmd->advertising &= bp->port.supported;
7566
7567                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7568                 bp->link_params.req_duplex = DUPLEX_FULL;
7569                 bp->port.advertising |= (ADVERTISED_Autoneg |
7570                                          cmd->advertising);
7571
7572         } else { /* forced speed */
7573                 /* advertise the requested speed and duplex if supported */
7574                 switch (cmd->speed) {
7575                 case SPEED_10:
7576                         if (cmd->duplex == DUPLEX_FULL) {
7577                                 if (!(bp->port.supported &
7578                                       SUPPORTED_10baseT_Full)) {
7579                                         DP(NETIF_MSG_LINK,
7580                                            "10M full not supported\n");
7581                                         return -EINVAL;
7582                                 }
7583
7584                                 advertising = (ADVERTISED_10baseT_Full |
7585                                                ADVERTISED_TP);
7586                         } else {
7587                                 if (!(bp->port.supported &
7588                                       SUPPORTED_10baseT_Half)) {
7589                                         DP(NETIF_MSG_LINK,
7590                                            "10M half not supported\n");
7591                                         return -EINVAL;
7592                                 }
7593
7594                                 advertising = (ADVERTISED_10baseT_Half |
7595                                                ADVERTISED_TP);
7596                         }
7597                         break;
7598
7599                 case SPEED_100:
7600                         if (cmd->duplex == DUPLEX_FULL) {
7601                                 if (!(bp->port.supported &
7602                                                 SUPPORTED_100baseT_Full)) {
7603                                         DP(NETIF_MSG_LINK,
7604                                            "100M full not supported\n");
7605                                         return -EINVAL;
7606                                 }
7607
7608                                 advertising = (ADVERTISED_100baseT_Full |
7609                                                ADVERTISED_TP);
7610                         } else {
7611                                 if (!(bp->port.supported &
7612                                                 SUPPORTED_100baseT_Half)) {
7613                                         DP(NETIF_MSG_LINK,
7614                                            "100M half not supported\n");
7615                                         return -EINVAL;
7616                                 }
7617
7618                                 advertising = (ADVERTISED_100baseT_Half |
7619                                                ADVERTISED_TP);
7620                         }
7621                         break;
7622
7623                 case SPEED_1000:
7624                         if (cmd->duplex != DUPLEX_FULL) {
7625                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7626                                 return -EINVAL;
7627                         }
7628
7629                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7630                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7631                                 return -EINVAL;
7632                         }
7633
7634                         advertising = (ADVERTISED_1000baseT_Full |
7635                                        ADVERTISED_TP);
7636                         break;
7637
7638                 case SPEED_2500:
7639                         if (cmd->duplex != DUPLEX_FULL) {
7640                                 DP(NETIF_MSG_LINK,
7641                                    "2.5G half not supported\n");
7642                                 return -EINVAL;
7643                         }
7644
7645                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7646                                 DP(NETIF_MSG_LINK,
7647                                    "2.5G full not supported\n");
7648                                 return -EINVAL;
7649                         }
7650
7651                         advertising = (ADVERTISED_2500baseX_Full |
7652                                        ADVERTISED_TP);
7653                         break;
7654
7655                 case SPEED_10000:
7656                         if (cmd->duplex != DUPLEX_FULL) {
7657                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7658                                 return -EINVAL;
7659                         }
7660
7661                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7662                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7663                                 return -EINVAL;
7664                         }
7665
7666                         advertising = (ADVERTISED_10000baseT_Full |
7667                                        ADVERTISED_FIBRE);
7668                         break;
7669
7670                 default:
7671                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7672                         return -EINVAL;
7673                 }
7674
7675                 bp->link_params.req_line_speed = cmd->speed;
7676                 bp->link_params.req_duplex = cmd->duplex;
7677                 bp->port.advertising = advertising;
7678         }
7679
7680         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7681            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7682            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7683            bp->port.advertising);
7684
7685         if (netif_running(dev)) {
7686                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7687                 bnx2x_link_set(bp);
7688         }
7689
7690         return 0;
7691 }
7692
7693 #define PHY_FW_VER_LEN                  10
7694
7695 static void bnx2x_get_drvinfo(struct net_device *dev,
7696                               struct ethtool_drvinfo *info)
7697 {
7698         struct bnx2x *bp = netdev_priv(dev);
7699         char phy_fw_ver[PHY_FW_VER_LEN];
7700
7701         strcpy(info->driver, DRV_MODULE_NAME);
7702         strcpy(info->version, DRV_MODULE_VERSION);
7703
7704         phy_fw_ver[0] = '\0';
7705         if (bp->port.pmf) {
7706                 bnx2x_phy_hw_lock(bp);
7707                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7708                                              (bp->state != BNX2X_STATE_CLOSED),
7709                                              phy_fw_ver, PHY_FW_VER_LEN);
7710                 bnx2x_phy_hw_unlock(bp);
7711         }
7712
7713         snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7714                  BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7715                  BCM_5710_FW_REVISION_VERSION,
7716                  BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7717                  ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7718         strcpy(info->bus_info, pci_name(bp->pdev));
7719         info->n_stats = BNX2X_NUM_STATS;
7720         info->testinfo_len = BNX2X_NUM_TESTS;
7721         info->eedump_len = bp->common.flash_size;
7722         info->regdump_len = 0;
7723 }
7724
7725 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7726 {
7727         struct bnx2x *bp = netdev_priv(dev);
7728
7729         if (bp->flags & NO_WOL_FLAG) {
7730                 wol->supported = 0;
7731                 wol->wolopts = 0;
7732         } else {
7733                 wol->supported = WAKE_MAGIC;
7734                 if (bp->wol)
7735                         wol->wolopts = WAKE_MAGIC;
7736                 else
7737                         wol->wolopts = 0;
7738         }
7739         memset(&wol->sopass, 0, sizeof(wol->sopass));
7740 }
7741
7742 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7743 {
7744         struct bnx2x *bp = netdev_priv(dev);
7745
7746         if (wol->wolopts & ~WAKE_MAGIC)
7747                 return -EINVAL;
7748
7749         if (wol->wolopts & WAKE_MAGIC) {
7750                 if (bp->flags & NO_WOL_FLAG)
7751                         return -EINVAL;
7752
7753                 bp->wol = 1;
7754         } else
7755                 bp->wol = 0;
7756
7757         return 0;
7758 }
7759
7760 static u32 bnx2x_get_msglevel(struct net_device *dev)
7761 {
7762         struct bnx2x *bp = netdev_priv(dev);
7763
7764         return bp->msglevel;
7765 }
7766
7767 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7768 {
7769         struct bnx2x *bp = netdev_priv(dev);
7770
7771         if (capable(CAP_NET_ADMIN))
7772                 bp->msglevel = level;
7773 }
7774
7775 static int bnx2x_nway_reset(struct net_device *dev)
7776 {
7777         struct bnx2x *bp = netdev_priv(dev);
7778
7779         if (!bp->port.pmf)
7780                 return 0;
7781
7782         if (netif_running(dev)) {
7783                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7784                 bnx2x_link_set(bp);
7785         }
7786
7787         return 0;
7788 }
7789
7790 static int bnx2x_get_eeprom_len(struct net_device *dev)
7791 {
7792         struct bnx2x *bp = netdev_priv(dev);
7793
7794         return bp->common.flash_size;
7795 }
7796
7797 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7798 {
7799         int port = BP_PORT(bp);
7800         int count, i;
7801         u32 val = 0;
7802
7803         /* adjust timeout for emulation/FPGA */
7804         count = NVRAM_TIMEOUT_COUNT;
7805         if (CHIP_REV_IS_SLOW(bp))
7806                 count *= 100;
7807
7808         /* request access to nvram interface */
7809         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7810                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7811
7812         for (i = 0; i < count*10; i++) {
7813                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7814                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7815                         break;
7816
7817                 udelay(5);
7818         }
7819
7820         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7821                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7822                 return -EBUSY;
7823         }
7824
7825         return 0;
7826 }
7827
7828 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7829 {
7830         int port = BP_PORT(bp);
7831         int count, i;
7832         u32 val = 0;
7833
7834         /* adjust timeout for emulation/FPGA */
7835         count = NVRAM_TIMEOUT_COUNT;
7836         if (CHIP_REV_IS_SLOW(bp))
7837                 count *= 100;
7838
7839         /* relinquish nvram interface */
7840         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7841                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7842
7843         for (i = 0; i < count*10; i++) {
7844                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7845                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7846                         break;
7847
7848                 udelay(5);
7849         }
7850
7851         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7852                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7853                 return -EBUSY;
7854         }
7855
7856         return 0;
7857 }
7858
7859 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7860 {
7861         u32 val;
7862
7863         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7864
7865         /* enable both bits, even on read */
7866         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7867                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7868                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7869 }
7870
7871 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7872 {
7873         u32 val;
7874
7875         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7876
7877         /* disable both bits, even after read */
7878         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7879                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7880                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7881 }
7882
7883 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7884                                   u32 cmd_flags)
7885 {
7886         int count, i, rc;
7887         u32 val;
7888
7889         /* build the command word */
7890         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7891
7892         /* need to clear DONE bit separately */
7893         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7894
7895         /* address of the NVRAM to read from */
7896         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7897                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7898
7899         /* issue a read command */
7900         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7901
7902         /* adjust timeout for emulation/FPGA */
7903         count = NVRAM_TIMEOUT_COUNT;
7904         if (CHIP_REV_IS_SLOW(bp))
7905                 count *= 100;
7906
7907         /* wait for completion */
7908         *ret_val = 0;
7909         rc = -EBUSY;
7910         for (i = 0; i < count; i++) {
7911                 udelay(5);
7912                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7913
7914                 if (val & MCPR_NVM_COMMAND_DONE) {
7915                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7916                         /* we read nvram data in cpu order
7917                          * but ethtool sees it as an array of bytes
7918                          * converting to big-endian will do the work */
7919                         val = cpu_to_be32(val);
7920                         *ret_val = val;
7921                         rc = 0;
7922                         break;
7923                 }
7924         }
7925
7926         return rc;
7927 }
7928
7929 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7930                             int buf_size)
7931 {
7932         int rc;
7933         u32 cmd_flags;
7934         u32 val;
7935
7936         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7937                 DP(BNX2X_MSG_NVM,
7938                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
7939                    offset, buf_size);
7940                 return -EINVAL;
7941         }
7942
7943         if (offset + buf_size > bp->common.flash_size) {
7944                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7945                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7946                    offset, buf_size, bp->common.flash_size);
7947                 return -EINVAL;
7948         }
7949
7950         /* request access to nvram interface */
7951         rc = bnx2x_acquire_nvram_lock(bp);
7952         if (rc)
7953                 return rc;
7954
7955         /* enable access to nvram interface */
7956         bnx2x_enable_nvram_access(bp);
7957
7958         /* read the first word(s) */
7959         cmd_flags = MCPR_NVM_COMMAND_FIRST;
7960         while ((buf_size > sizeof(u32)) && (rc == 0)) {
7961                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7962                 memcpy(ret_buf, &val, 4);
7963
7964                 /* advance to the next dword */
7965                 offset += sizeof(u32);
7966                 ret_buf += sizeof(u32);
7967                 buf_size -= sizeof(u32);
7968                 cmd_flags = 0;
7969         }
7970
7971         if (rc == 0) {
7972                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7973                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7974                 memcpy(ret_buf, &val, 4);
7975         }
7976
7977         /* disable access to nvram interface */
7978         bnx2x_disable_nvram_access(bp);
7979         bnx2x_release_nvram_lock(bp);
7980
7981         return rc;
7982 }
7983
7984 static int bnx2x_get_eeprom(struct net_device *dev,
7985                             struct ethtool_eeprom *eeprom, u8 *eebuf)
7986 {
7987         struct bnx2x *bp = netdev_priv(dev);
7988         int rc;
7989
7990         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7991            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
7992            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7993            eeprom->len, eeprom->len);
7994
7995         /* parameters already validated in ethtool_get_eeprom */
7996
7997         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7998
7999         return rc;
8000 }
8001
8002 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8003                                    u32 cmd_flags)
8004 {
8005         int count, i, rc;
8006
8007         /* build the command word */
8008         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8009
8010         /* need to clear DONE bit separately */
8011         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8012
8013         /* write the data */
8014         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8015
8016         /* address of the NVRAM to write to */
8017         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8018                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8019
8020         /* issue the write command */
8021         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8022
8023         /* adjust timeout for emulation/FPGA */
8024         count = NVRAM_TIMEOUT_COUNT;
8025         if (CHIP_REV_IS_SLOW(bp))
8026                 count *= 100;
8027
8028         /* wait for completion */
8029         rc = -EBUSY;
8030         for (i = 0; i < count; i++) {
8031                 udelay(5);
8032                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8033                 if (val & MCPR_NVM_COMMAND_DONE) {
8034                         rc = 0;
8035                         break;
8036                 }
8037         }
8038
8039         return rc;
8040 }
8041
8042 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8043
8044 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8045                               int buf_size)
8046 {
8047         int rc;
8048         u32 cmd_flags;
8049         u32 align_offset;
8050         u32 val;
8051
8052         if (offset + buf_size > bp->common.flash_size) {
8053                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8054                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8055                    offset, buf_size, bp->common.flash_size);
8056                 return -EINVAL;
8057         }
8058
8059         /* request access to nvram interface */
8060         rc = bnx2x_acquire_nvram_lock(bp);
8061         if (rc)
8062                 return rc;
8063
8064         /* enable access to nvram interface */
8065         bnx2x_enable_nvram_access(bp);
8066
8067         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8068         align_offset = (offset & ~0x03);
8069         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8070
8071         if (rc == 0) {
8072                 val &= ~(0xff << BYTE_OFFSET(offset));
8073                 val |= (*data_buf << BYTE_OFFSET(offset));
8074
8075                 /* nvram data is returned as an array of bytes
8076                  * convert it back to cpu order */
8077                 val = be32_to_cpu(val);
8078
8079                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8080                                              cmd_flags);
8081         }
8082
8083         /* disable access to nvram interface */
8084         bnx2x_disable_nvram_access(bp);
8085         bnx2x_release_nvram_lock(bp);
8086
8087         return rc;
8088 }
8089
8090 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8091                              int buf_size)
8092 {
8093         int rc;
8094         u32 cmd_flags;
8095         u32 val;
8096         u32 written_so_far;
8097
8098         if (buf_size == 1)      /* ethtool */
8099                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8100
8101         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8102                 DP(BNX2X_MSG_NVM,
8103                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8104                    offset, buf_size);
8105                 return -EINVAL;
8106         }
8107
8108         if (offset + buf_size > bp->common.flash_size) {
8109                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8110                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8111                    offset, buf_size, bp->common.flash_size);
8112                 return -EINVAL;
8113         }
8114
8115         /* request access to nvram interface */
8116         rc = bnx2x_acquire_nvram_lock(bp);
8117         if (rc)
8118                 return rc;
8119
8120         /* enable access to nvram interface */
8121         bnx2x_enable_nvram_access(bp);
8122
8123         written_so_far = 0;
8124         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8125         while ((written_so_far < buf_size) && (rc == 0)) {
8126                 if (written_so_far == (buf_size - sizeof(u32)))
8127                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8128                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8129                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8130                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8131                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8132
8133                 memcpy(&val, data_buf, 4);
8134
8135                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8136
8137                 /* advance to the next dword */
8138                 offset += sizeof(u32);
8139                 data_buf += sizeof(u32);
8140                 written_so_far += sizeof(u32);
8141                 cmd_flags = 0;
8142         }
8143
8144         /* disable access to nvram interface */
8145         bnx2x_disable_nvram_access(bp);
8146         bnx2x_release_nvram_lock(bp);
8147
8148         return rc;
8149 }
8150
8151 static int bnx2x_set_eeprom(struct net_device *dev,
8152                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8153 {
8154         struct bnx2x *bp = netdev_priv(dev);
8155         int rc;
8156
8157         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8158            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8159            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8160            eeprom->len, eeprom->len);
8161
8162         /* parameters already validated in ethtool_set_eeprom */
8163
8164         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8165         if (eeprom->magic == 0x00504859)
8166                 if (bp->port.pmf) {
8167
8168                         bnx2x_phy_hw_lock(bp);
8169                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8170                                              bp->link_params.ext_phy_config,
8171                                              (bp->state != BNX2X_STATE_CLOSED),
8172                                              eebuf, eeprom->len);
8173                         if ((bp->state == BNX2X_STATE_OPEN) ||
8174                             (bp->state == BNX2X_STATE_DISABLED)) {
8175                                 rc |= bnx2x_link_reset(&bp->link_params,
8176                                                        &bp->link_vars);
8177                                 rc |= bnx2x_phy_init(&bp->link_params,
8178                                                      &bp->link_vars);
8179                         }
8180                         bnx2x_phy_hw_unlock(bp);
8181
8182                 } else /* Only the PMF can access the PHY */
8183                         return -EINVAL;
8184         else
8185                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8186
8187         return rc;
8188 }
8189
8190 static int bnx2x_get_coalesce(struct net_device *dev,
8191                               struct ethtool_coalesce *coal)
8192 {
8193         struct bnx2x *bp = netdev_priv(dev);
8194
8195         memset(coal, 0, sizeof(struct ethtool_coalesce));
8196
8197         coal->rx_coalesce_usecs = bp->rx_ticks;
8198         coal->tx_coalesce_usecs = bp->tx_ticks;
8199
8200         return 0;
8201 }
8202
8203 static int bnx2x_set_coalesce(struct net_device *dev,
8204                               struct ethtool_coalesce *coal)
8205 {
8206         struct bnx2x *bp = netdev_priv(dev);
8207
8208         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8209         if (bp->rx_ticks > 3000)
8210                 bp->rx_ticks = 3000;
8211
8212         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8213         if (bp->tx_ticks > 0x3000)
8214                 bp->tx_ticks = 0x3000;
8215
8216         if (netif_running(dev))
8217                 bnx2x_update_coalesce(bp);
8218
8219         return 0;
8220 }
8221
8222 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8223 {
8224         struct bnx2x *bp = netdev_priv(dev);
8225         int changed = 0;
8226         int rc = 0;
8227
8228         if (data & ETH_FLAG_LRO) {
8229                 if (!(dev->features & NETIF_F_LRO)) {
8230                         dev->features |= NETIF_F_LRO;
8231                         bp->flags |= TPA_ENABLE_FLAG;
8232                         changed = 1;
8233                 }
8234
8235         } else if (dev->features & NETIF_F_LRO) {
8236                 dev->features &= ~NETIF_F_LRO;
8237                 bp->flags &= ~TPA_ENABLE_FLAG;
8238                 changed = 1;
8239         }
8240
8241         if (changed && netif_running(dev)) {
8242                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8243                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8244         }
8245
8246         return rc;
8247 }
8248
8249 static void bnx2x_get_ringparam(struct net_device *dev,
8250                                 struct ethtool_ringparam *ering)
8251 {
8252         struct bnx2x *bp = netdev_priv(dev);
8253
8254         ering->rx_max_pending = MAX_RX_AVAIL;
8255         ering->rx_mini_max_pending = 0;
8256         ering->rx_jumbo_max_pending = 0;
8257
8258         ering->rx_pending = bp->rx_ring_size;
8259         ering->rx_mini_pending = 0;
8260         ering->rx_jumbo_pending = 0;
8261
8262         ering->tx_max_pending = MAX_TX_AVAIL;
8263         ering->tx_pending = bp->tx_ring_size;
8264 }
8265
8266 static int bnx2x_set_ringparam(struct net_device *dev,
8267                                struct ethtool_ringparam *ering)
8268 {
8269         struct bnx2x *bp = netdev_priv(dev);
8270         int rc = 0;
8271
8272         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8273             (ering->tx_pending > MAX_TX_AVAIL) ||
8274             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8275                 return -EINVAL;
8276
8277         bp->rx_ring_size = ering->rx_pending;
8278         bp->tx_ring_size = ering->tx_pending;
8279
8280         if (netif_running(dev)) {
8281                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8282                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8283         }
8284
8285         return rc;
8286 }
8287
8288 static void bnx2x_get_pauseparam(struct net_device *dev,
8289                                  struct ethtool_pauseparam *epause)
8290 {
8291         struct bnx2x *bp = netdev_priv(dev);
8292
8293         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8294                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8295
8296         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8297                             FLOW_CTRL_RX);
8298         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8299                             FLOW_CTRL_TX);
8300
8301         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8302            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8303            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8304 }
8305
8306 static int bnx2x_set_pauseparam(struct net_device *dev,
8307                                 struct ethtool_pauseparam *epause)
8308 {
8309         struct bnx2x *bp = netdev_priv(dev);
8310
8311         if (IS_E1HMF(bp))
8312                 return 0;
8313
8314         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8315            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8316            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8317
8318         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8319
8320         if (epause->rx_pause)
8321                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8322
8323         if (epause->tx_pause)
8324                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8325
8326         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8327                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8328
8329         if (epause->autoneg) {
8330                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8331                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8332                         return -EINVAL;
8333                 }
8334
8335                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8336                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8337         }
8338
8339         DP(NETIF_MSG_LINK,
8340            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8341
8342         if (netif_running(dev)) {
8343                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8344                 bnx2x_link_set(bp);
8345         }
8346
8347         return 0;
8348 }
8349
8350 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8351 {
8352         struct bnx2x *bp = netdev_priv(dev);
8353
8354         return bp->rx_csum;
8355 }
8356
8357 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8358 {
8359         struct bnx2x *bp = netdev_priv(dev);
8360
8361         bp->rx_csum = data;
8362         return 0;
8363 }
8364
8365 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8366 {
8367         if (data) {
8368                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8369                 dev->features |= NETIF_F_TSO6;
8370         } else {
8371                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8372                 dev->features &= ~NETIF_F_TSO6;
8373         }
8374
8375         return 0;
8376 }
8377
8378 static const struct {
8379         char string[ETH_GSTRING_LEN];
8380 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8381         { "register_test (offline)" },
8382         { "memory_test (offline)" },
8383         { "loopback_test (offline)" },
8384         { "nvram_test (online)" },
8385         { "interrupt_test (online)" },
8386         { "link_test (online)" },
8387         { "idle check (online)" },
8388         { "MC errors (online)" }
8389 };
8390
8391 static int bnx2x_self_test_count(struct net_device *dev)
8392 {
8393         return BNX2X_NUM_TESTS;
8394 }
8395
8396 static int bnx2x_test_registers(struct bnx2x *bp)
8397 {
8398         int idx, i, rc = -ENODEV;
8399         u32 wr_val = 0;
8400         static const struct {
8401                 u32  offset0;
8402                 u32  offset1;
8403                 u32  mask;
8404         } reg_tbl[] = {
8405 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8406                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8407                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8408                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8409                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8410                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8411                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8412                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8413                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8414                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8415 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8416                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8417                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8418                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8419                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8420                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8421                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8422                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8423                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8424                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8425 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8426                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8427                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8428                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8429                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8430                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8431                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8432                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8433                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8434                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8435 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8436                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8437                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8438                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8439                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8440                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8441                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8442                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8443
8444                 { 0xffffffff, 0, 0x00000000 }
8445         };
8446
8447         if (!netif_running(bp->dev))
8448                 return rc;
8449
8450         /* Repeat the test twice:
8451            First by writing 0x00000000, second by writing 0xffffffff */
8452         for (idx = 0; idx < 2; idx++) {
8453
8454                 switch (idx) {
8455                 case 0:
8456                         wr_val = 0;
8457                         break;
8458                 case 1:
8459                         wr_val = 0xffffffff;
8460                         break;
8461                 }
8462
8463                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8464                         u32 offset, mask, save_val, val;
8465                         int port = BP_PORT(bp);
8466
8467                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8468                         mask = reg_tbl[i].mask;
8469
8470                         save_val = REG_RD(bp, offset);
8471
8472                         REG_WR(bp, offset, wr_val);
8473                         val = REG_RD(bp, offset);
8474
8475                         /* Restore the original register's value */
8476                         REG_WR(bp, offset, save_val);
8477
8478                         /* verify that value is as expected value */
8479                         if ((val & mask) != (wr_val & mask))
8480                                 goto test_reg_exit;
8481                 }
8482         }
8483
8484         rc = 0;
8485
8486 test_reg_exit:
8487         return rc;
8488 }
8489
8490 static int bnx2x_test_memory(struct bnx2x *bp)
8491 {
8492         int i, j, rc = -ENODEV;
8493         u32 val;
8494         static const struct {
8495                 u32 offset;
8496                 int size;
8497         } mem_tbl[] = {
8498                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8499                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8500                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8501                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8502                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8503                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8504                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8505
8506                 { 0xffffffff, 0 }
8507         };
8508         static const struct {
8509                 char *name;
8510                 u32 offset;
8511                 u32 mask;
8512         } prty_tbl[] = {
8513                 { "CCM_REG_CCM_PRTY_STS",     CCM_REG_CCM_PRTY_STS,     0 },
8514                 { "CFC_REG_CFC_PRTY_STS",     CFC_REG_CFC_PRTY_STS,     0 },
8515                 { "DMAE_REG_DMAE_PRTY_STS",   DMAE_REG_DMAE_PRTY_STS,   0 },
8516                 { "TCM_REG_TCM_PRTY_STS",     TCM_REG_TCM_PRTY_STS,     0 },
8517                 { "UCM_REG_UCM_PRTY_STS",     UCM_REG_UCM_PRTY_STS,     0 },
8518                 { "XCM_REG_XCM_PRTY_STS",     XCM_REG_XCM_PRTY_STS,     0x1 },
8519
8520                 { NULL, 0xffffffff, 0 }
8521         };
8522
8523         if (!netif_running(bp->dev))
8524                 return rc;
8525
8526         /* Go through all the memories */
8527         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8528                 for (j = 0; j < mem_tbl[i].size; j++)
8529                         REG_RD(bp, mem_tbl[i].offset + j*4);
8530
8531         /* Check the parity status */
8532         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8533                 val = REG_RD(bp, prty_tbl[i].offset);
8534                 if (val & ~(prty_tbl[i].mask)) {
8535                         DP(NETIF_MSG_HW,
8536                            "%s is 0x%x\n", prty_tbl[i].name, val);
8537                         goto test_mem_exit;
8538                 }
8539         }
8540
8541         rc = 0;
8542
8543 test_mem_exit:
8544         return rc;
8545 }
8546
8547 static void bnx2x_netif_start(struct bnx2x *bp)
8548 {
8549         int i;
8550
8551         if (atomic_dec_and_test(&bp->intr_sem)) {
8552                 if (netif_running(bp->dev)) {
8553                         bnx2x_int_enable(bp);
8554                         for_each_queue(bp, i)
8555                                 napi_enable(&bnx2x_fp(bp, i, napi));
8556                         if (bp->state == BNX2X_STATE_OPEN)
8557                                 netif_wake_queue(bp->dev);
8558                 }
8559         }
8560 }
8561
8562 static void bnx2x_netif_stop(struct bnx2x *bp)
8563 {
8564         int i;
8565
8566         if (netif_running(bp->dev)) {
8567                 netif_tx_disable(bp->dev);
8568                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8569                 for_each_queue(bp, i)
8570                         napi_disable(&bnx2x_fp(bp, i, napi));
8571         }
8572         bnx2x_int_disable_sync(bp);
8573 }
8574
8575 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8576 {
8577         int cnt = 1000;
8578
8579         if (link_up)
8580                 while (bnx2x_link_test(bp) && cnt--)
8581                         msleep(10);
8582 }
8583
8584 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8585 {
8586         unsigned int pkt_size, num_pkts, i;
8587         struct sk_buff *skb;
8588         unsigned char *packet;
8589         struct bnx2x_fastpath *fp = &bp->fp[0];
8590         u16 tx_start_idx, tx_idx;
8591         u16 rx_start_idx, rx_idx;
8592         u16 pkt_prod;
8593         struct sw_tx_bd *tx_buf;
8594         struct eth_tx_bd *tx_bd;
8595         dma_addr_t mapping;
8596         union eth_rx_cqe *cqe;
8597         u8 cqe_fp_flags;
8598         struct sw_rx_bd *rx_buf;
8599         u16 len;
8600         int rc = -ENODEV;
8601
8602         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8603                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8604                 bnx2x_phy_hw_lock(bp);
8605                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8606                 bnx2x_phy_hw_unlock(bp);
8607
8608         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8609                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8610                 bnx2x_phy_hw_lock(bp);
8611                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8612                 bnx2x_phy_hw_unlock(bp);
8613                 /* wait until link state is restored */
8614                 bnx2x_wait_for_link(bp, link_up);
8615
8616         } else
8617                 return -EINVAL;
8618
8619         pkt_size = 1514;
8620         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8621         if (!skb) {
8622                 rc = -ENOMEM;
8623                 goto test_loopback_exit;
8624         }
8625         packet = skb_put(skb, pkt_size);
8626         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8627         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8628         for (i = ETH_HLEN; i < pkt_size; i++)
8629                 packet[i] = (unsigned char) (i & 0xff);
8630
8631         num_pkts = 0;
8632         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8633         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8634
8635         pkt_prod = fp->tx_pkt_prod++;
8636         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8637         tx_buf->first_bd = fp->tx_bd_prod;
8638         tx_buf->skb = skb;
8639
8640         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8641         mapping = pci_map_single(bp->pdev, skb->data,
8642                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8643         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8644         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8645         tx_bd->nbd = cpu_to_le16(1);
8646         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8647         tx_bd->vlan = cpu_to_le16(pkt_prod);
8648         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8649                                        ETH_TX_BD_FLAGS_END_BD);
8650         tx_bd->general_data = ((UNICAST_ADDRESS <<
8651                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8652
8653         fp->hw_tx_prods->bds_prod =
8654                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8655         mb(); /* FW restriction: must not reorder writing nbd and packets */
8656         fp->hw_tx_prods->packets_prod =
8657                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8658         DOORBELL(bp, FP_IDX(fp), 0);
8659
8660         mmiowb();
8661
8662         num_pkts++;
8663         fp->tx_bd_prod++;
8664         bp->dev->trans_start = jiffies;
8665
8666         udelay(100);
8667
8668         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8669         if (tx_idx != tx_start_idx + num_pkts)
8670                 goto test_loopback_exit;
8671
8672         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8673         if (rx_idx != rx_start_idx + num_pkts)
8674                 goto test_loopback_exit;
8675
8676         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8677         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8678         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8679                 goto test_loopback_rx_exit;
8680
8681         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8682         if (len != pkt_size)
8683                 goto test_loopback_rx_exit;
8684
8685         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8686         skb = rx_buf->skb;
8687         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8688         for (i = ETH_HLEN; i < pkt_size; i++)
8689                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8690                         goto test_loopback_rx_exit;
8691
8692         rc = 0;
8693
8694 test_loopback_rx_exit:
8695         bp->dev->last_rx = jiffies;
8696
8697         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8698         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8699         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8700         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8701
8702         /* Update producers */
8703         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8704                              fp->rx_sge_prod);
8705         mmiowb(); /* keep prod updates ordered */
8706
8707 test_loopback_exit:
8708         bp->link_params.loopback_mode = LOOPBACK_NONE;
8709
8710         return rc;
8711 }
8712
8713 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8714 {
8715         int rc = 0;
8716
8717         if (!netif_running(bp->dev))
8718                 return BNX2X_LOOPBACK_FAILED;
8719
8720         bnx2x_netif_stop(bp);
8721
8722         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8723                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8724                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8725         }
8726
8727         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8728                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8729                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8730         }
8731
8732         bnx2x_netif_start(bp);
8733
8734         return rc;
8735 }
8736
8737 #define CRC32_RESIDUAL                  0xdebb20e3
8738
8739 static int bnx2x_test_nvram(struct bnx2x *bp)
8740 {
8741         static const struct {
8742                 int offset;
8743                 int size;
8744         } nvram_tbl[] = {
8745                 {     0,  0x14 }, /* bootstrap */
8746                 {  0x14,  0xec }, /* dir */
8747                 { 0x100, 0x350 }, /* manuf_info */
8748                 { 0x450,  0xf0 }, /* feature_info */
8749                 { 0x640,  0x64 }, /* upgrade_key_info */
8750                 { 0x6a4,  0x64 },
8751                 { 0x708,  0x70 }, /* manuf_key_info */
8752                 { 0x778,  0x70 },
8753                 {     0,     0 }
8754         };
8755         u32 buf[0x350 / 4];
8756         u8 *data = (u8 *)buf;
8757         int i, rc;
8758         u32 magic, csum;
8759
8760         rc = bnx2x_nvram_read(bp, 0, data, 4);
8761         if (rc) {
8762                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8763                 goto test_nvram_exit;
8764         }
8765
8766         magic = be32_to_cpu(buf[0]);
8767         if (magic != 0x669955aa) {
8768                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8769                 rc = -ENODEV;
8770                 goto test_nvram_exit;
8771         }
8772
8773         for (i = 0; nvram_tbl[i].size; i++) {
8774
8775                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8776                                       nvram_tbl[i].size);
8777                 if (rc) {
8778                         DP(NETIF_MSG_PROBE,
8779                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8780                         goto test_nvram_exit;
8781                 }
8782
8783                 csum = ether_crc_le(nvram_tbl[i].size, data);
8784                 if (csum != CRC32_RESIDUAL) {
8785                         DP(NETIF_MSG_PROBE,
8786                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8787                         rc = -ENODEV;
8788                         goto test_nvram_exit;
8789                 }
8790         }
8791
8792 test_nvram_exit:
8793         return rc;
8794 }
8795
8796 static int bnx2x_test_intr(struct bnx2x *bp)
8797 {
8798         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8799         int i, rc;
8800
8801         if (!netif_running(bp->dev))
8802                 return -ENODEV;
8803
8804         config->hdr.length_6b = 0;
8805         config->hdr.offset = 0;
8806         config->hdr.client_id = BP_CL_ID(bp);
8807         config->hdr.reserved1 = 0;
8808
8809         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8810                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8811                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8812         if (rc == 0) {
8813                 bp->set_mac_pending++;
8814                 for (i = 0; i < 10; i++) {
8815                         if (!bp->set_mac_pending)
8816                                 break;
8817                         msleep_interruptible(10);
8818                 }
8819                 if (i == 10)
8820                         rc = -ENODEV;
8821         }
8822
8823         return rc;
8824 }
8825
8826 static void bnx2x_self_test(struct net_device *dev,
8827                             struct ethtool_test *etest, u64 *buf)
8828 {
8829         struct bnx2x *bp = netdev_priv(dev);
8830
8831         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8832
8833         if (!netif_running(dev))
8834                 return;
8835
8836         /* offline tests are not suppoerted in MF mode */
8837         if (IS_E1HMF(bp))
8838                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8839
8840         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8841                 u8 link_up;
8842
8843                 link_up = bp->link_vars.link_up;
8844                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8845                 bnx2x_nic_load(bp, LOAD_DIAG);
8846                 /* wait until link state is restored */
8847                 bnx2x_wait_for_link(bp, link_up);
8848
8849                 if (bnx2x_test_registers(bp) != 0) {
8850                         buf[0] = 1;
8851                         etest->flags |= ETH_TEST_FL_FAILED;
8852                 }
8853                 if (bnx2x_test_memory(bp) != 0) {
8854                         buf[1] = 1;
8855                         etest->flags |= ETH_TEST_FL_FAILED;
8856                 }
8857                 buf[2] = bnx2x_test_loopback(bp, link_up);
8858                 if (buf[2] != 0)
8859                         etest->flags |= ETH_TEST_FL_FAILED;
8860
8861                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8862                 bnx2x_nic_load(bp, LOAD_NORMAL);
8863                 /* wait until link state is restored */
8864                 bnx2x_wait_for_link(bp, link_up);
8865         }
8866         if (bnx2x_test_nvram(bp) != 0) {
8867                 buf[3] = 1;
8868                 etest->flags |= ETH_TEST_FL_FAILED;
8869         }
8870         if (bnx2x_test_intr(bp) != 0) {
8871                 buf[4] = 1;
8872                 etest->flags |= ETH_TEST_FL_FAILED;
8873         }
8874         if (bp->port.pmf)
8875                 if (bnx2x_link_test(bp) != 0) {
8876                         buf[5] = 1;
8877                         etest->flags |= ETH_TEST_FL_FAILED;
8878                 }
8879         buf[7] = bnx2x_mc_assert(bp);
8880         if (buf[7] != 0)
8881                 etest->flags |= ETH_TEST_FL_FAILED;
8882
8883 #ifdef BNX2X_EXTRA_DEBUG
8884         bnx2x_panic_dump(bp);
8885 #endif
8886 }
8887
8888 static const struct {
8889         long offset;
8890         int size;
8891         u32 flags;
8892 #define STATS_FLAGS_PORT                1
8893 #define STATS_FLAGS_FUNC                2
8894         u8 string[ETH_GSTRING_LEN];
8895 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8896 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8897                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8898         { STATS_OFFSET32(error_bytes_received_hi),
8899                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8900         { STATS_OFFSET32(total_bytes_transmitted_hi),
8901                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8902         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8903                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8904         { STATS_OFFSET32(total_unicast_packets_received_hi),
8905                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8906         { STATS_OFFSET32(total_multicast_packets_received_hi),
8907                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8908         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8909                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8910         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8911                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8912         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8913                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8914 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8915                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8916         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8917                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8918         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8919                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8920         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8921                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8922         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8923                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8924         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8925                                 8, STATS_FLAGS_PORT, "tx_deferred" },
8926         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8927                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8928         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8929                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8930         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8931                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8932         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8933                                 8, STATS_FLAGS_PORT, "rx_fragments" },
8934 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8935                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
8936         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8937                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8938         { STATS_OFFSET32(jabber_packets_received),
8939                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8940         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8941                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8942         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8943                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8944         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8945                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8946         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8947                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8948         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8949                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8950         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8951                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8952         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8953                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8954 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8955                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8956         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8957                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8958         { STATS_OFFSET32(tx_stat_outxonsent_hi),
8959                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
8960         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
8961                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8962         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8963                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8964         { STATS_OFFSET32(mac_filter_discard),
8965                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8966         { STATS_OFFSET32(no_buff_discard),
8967                                 4, STATS_FLAGS_FUNC, "rx_discards" },
8968         { STATS_OFFSET32(xxoverflow_discard),
8969                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
8970         { STATS_OFFSET32(brb_drop_hi),
8971                                 8, STATS_FLAGS_PORT, "brb_discard" },
8972         { STATS_OFFSET32(brb_truncate_hi),
8973                                 8, STATS_FLAGS_PORT, "brb_truncate" },
8974 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
8975                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
8976         { STATS_OFFSET32(rx_skb_alloc_failed),
8977                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
8978 /* 42 */{ STATS_OFFSET32(hw_csum_err),
8979                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8980 };
8981
8982 #define IS_NOT_E1HMF_STAT(bp, i) \
8983                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
8984
8985 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8986 {
8987         struct bnx2x *bp = netdev_priv(dev);
8988         int i, j;
8989
8990         switch (stringset) {
8991         case ETH_SS_STATS:
8992                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8993                         if (IS_NOT_E1HMF_STAT(bp, i))
8994                                 continue;
8995                         strcpy(buf + j*ETH_GSTRING_LEN,
8996                                bnx2x_stats_arr[i].string);
8997                         j++;
8998                 }
8999                 break;
9000
9001         case ETH_SS_TEST:
9002                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9003                 break;
9004         }
9005 }
9006
9007 static int bnx2x_get_stats_count(struct net_device *dev)
9008 {
9009         struct bnx2x *bp = netdev_priv(dev);
9010         int i, num_stats = 0;
9011
9012         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9013                 if (IS_NOT_E1HMF_STAT(bp, i))
9014                         continue;
9015                 num_stats++;
9016         }
9017         return num_stats;
9018 }
9019
9020 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9021                                     struct ethtool_stats *stats, u64 *buf)
9022 {
9023         struct bnx2x *bp = netdev_priv(dev);
9024         u32 *hw_stats = (u32 *)&bp->eth_stats;
9025         int i, j;
9026
9027         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9028                 if (IS_NOT_E1HMF_STAT(bp, i))
9029                         continue;
9030
9031                 if (bnx2x_stats_arr[i].size == 0) {
9032                         /* skip this counter */
9033                         buf[j] = 0;
9034                         j++;
9035                         continue;
9036                 }
9037                 if (bnx2x_stats_arr[i].size == 4) {
9038                         /* 4-byte counter */
9039                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9040                         j++;
9041                         continue;
9042                 }
9043                 /* 8-byte counter */
9044                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9045                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9046                 j++;
9047         }
9048 }
9049
9050 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9051 {
9052         struct bnx2x *bp = netdev_priv(dev);
9053         int port = BP_PORT(bp);
9054         int i;
9055
9056         if (!netif_running(dev))
9057                 return 0;
9058
9059         if (!bp->port.pmf)
9060                 return 0;
9061
9062         if (data == 0)
9063                 data = 2;
9064
9065         for (i = 0; i < (data * 2); i++) {
9066                 if ((i % 2) == 0)
9067                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9068                                       bp->link_params.hw_led_mode,
9069                                       bp->link_params.chip_id);
9070                 else
9071                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9072                                       bp->link_params.hw_led_mode,
9073                                       bp->link_params.chip_id);
9074
9075                 msleep_interruptible(500);
9076                 if (signal_pending(current))
9077                         break;
9078         }
9079
9080         if (bp->link_vars.link_up)
9081                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9082                               bp->link_vars.line_speed,
9083                               bp->link_params.hw_led_mode,
9084                               bp->link_params.chip_id);
9085
9086         return 0;
9087 }
9088
9089 static struct ethtool_ops bnx2x_ethtool_ops = {
9090         .get_settings           = bnx2x_get_settings,
9091         .set_settings           = bnx2x_set_settings,
9092         .get_drvinfo            = bnx2x_get_drvinfo,
9093         .get_wol                = bnx2x_get_wol,
9094         .set_wol                = bnx2x_set_wol,
9095         .get_msglevel           = bnx2x_get_msglevel,
9096         .set_msglevel           = bnx2x_set_msglevel,
9097         .nway_reset             = bnx2x_nway_reset,
9098         .get_link               = ethtool_op_get_link,
9099         .get_eeprom_len         = bnx2x_get_eeprom_len,
9100         .get_eeprom             = bnx2x_get_eeprom,
9101         .set_eeprom             = bnx2x_set_eeprom,
9102         .get_coalesce           = bnx2x_get_coalesce,
9103         .set_coalesce           = bnx2x_set_coalesce,
9104         .get_ringparam          = bnx2x_get_ringparam,
9105         .set_ringparam          = bnx2x_set_ringparam,
9106         .get_pauseparam         = bnx2x_get_pauseparam,
9107         .set_pauseparam         = bnx2x_set_pauseparam,
9108         .get_rx_csum            = bnx2x_get_rx_csum,
9109         .set_rx_csum            = bnx2x_set_rx_csum,
9110         .get_tx_csum            = ethtool_op_get_tx_csum,
9111         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9112         .set_flags              = bnx2x_set_flags,
9113         .get_flags              = ethtool_op_get_flags,
9114         .get_sg                 = ethtool_op_get_sg,
9115         .set_sg                 = ethtool_op_set_sg,
9116         .get_tso                = ethtool_op_get_tso,
9117         .set_tso                = bnx2x_set_tso,
9118         .self_test_count        = bnx2x_self_test_count,
9119         .self_test              = bnx2x_self_test,
9120         .get_strings            = bnx2x_get_strings,
9121         .phys_id                = bnx2x_phys_id,
9122         .get_stats_count        = bnx2x_get_stats_count,
9123         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9124 };
9125
9126 /* end of ethtool_ops */
9127
9128 /****************************************************************************
9129 * General service functions
9130 ****************************************************************************/
9131
9132 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9133 {
9134         u16 pmcsr;
9135
9136         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9137
9138         switch (state) {
9139         case PCI_D0:
9140                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9141                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9142                                        PCI_PM_CTRL_PME_STATUS));
9143
9144                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9145                 /* delay required during transition out of D3hot */
9146                         msleep(20);
9147                 break;
9148
9149         case PCI_D3hot:
9150                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9151                 pmcsr |= 3;
9152
9153                 if (bp->wol)
9154                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9155
9156                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9157                                       pmcsr);
9158
9159                 /* No more memory access after this point until
9160                 * device is brought back to D0.
9161                 */
9162                 break;
9163
9164         default:
9165                 return -EINVAL;
9166         }
9167         return 0;
9168 }
9169
9170 /*
9171  * net_device service functions
9172  */
9173
9174 static int bnx2x_poll(struct napi_struct *napi, int budget)
9175 {
9176         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9177                                                  napi);
9178         struct bnx2x *bp = fp->bp;
9179         int work_done = 0;
9180
9181 #ifdef BNX2X_STOP_ON_ERROR
9182         if (unlikely(bp->panic))
9183                 goto poll_panic;
9184 #endif
9185
9186         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9187         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9188         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9189
9190         bnx2x_update_fpsb_idx(fp);
9191
9192         if (BNX2X_HAS_TX_WORK(fp))
9193                 bnx2x_tx_int(fp, budget);
9194
9195         if (BNX2X_HAS_RX_WORK(fp))
9196                 work_done = bnx2x_rx_int(fp, budget);
9197
9198         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9199
9200         /* must not complete if we consumed full budget */
9201         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9202
9203 #ifdef BNX2X_STOP_ON_ERROR
9204 poll_panic:
9205 #endif
9206                 netif_rx_complete(bp->dev, napi);
9207
9208                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9209                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9210                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9211                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9212         }
9213         return work_done;
9214 }
9215
9216
9217 /* we split the first BD into headers and data BDs
9218  * to ease the pain of our fellow micocode engineers
9219  * we use one mapping for both BDs
9220  * So far this has only been observed to happen
9221  * in Other Operating Systems(TM)
9222  */
9223 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9224                                    struct bnx2x_fastpath *fp,
9225                                    struct eth_tx_bd **tx_bd, u16 hlen,
9226                                    u16 bd_prod, int nbd)
9227 {
9228         struct eth_tx_bd *h_tx_bd = *tx_bd;
9229         struct eth_tx_bd *d_tx_bd;
9230         dma_addr_t mapping;
9231         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9232
9233         /* first fix first BD */
9234         h_tx_bd->nbd = cpu_to_le16(nbd);
9235         h_tx_bd->nbytes = cpu_to_le16(hlen);
9236
9237         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9238            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9239            h_tx_bd->addr_lo, h_tx_bd->nbd);
9240
9241         /* now get a new data BD
9242          * (after the pbd) and fill it */
9243         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9244         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9245
9246         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9247                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9248
9249         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9250         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9251         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9252         d_tx_bd->vlan = 0;
9253         /* this marks the BD as one that has no individual mapping
9254          * the FW ignores this flag in a BD not marked start
9255          */
9256         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9257         DP(NETIF_MSG_TX_QUEUED,
9258            "TSO split data size is %d (%x:%x)\n",
9259            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9260
9261         /* update tx_bd for marking the last BD flag */
9262         *tx_bd = d_tx_bd;
9263
9264         return bd_prod;
9265 }
9266
9267 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9268 {
9269         if (fix > 0)
9270                 csum = (u16) ~csum_fold(csum_sub(csum,
9271                                 csum_partial(t_header - fix, fix, 0)));
9272
9273         else if (fix < 0)
9274                 csum = (u16) ~csum_fold(csum_add(csum,
9275                                 csum_partial(t_header, -fix, 0)));
9276
9277         return swab16(csum);
9278 }
9279
9280 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9281 {
9282         u32 rc;
9283
9284         if (skb->ip_summed != CHECKSUM_PARTIAL)
9285                 rc = XMIT_PLAIN;
9286
9287         else {
9288                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9289                         rc = XMIT_CSUM_V6;
9290                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9291                                 rc |= XMIT_CSUM_TCP;
9292
9293                 } else {
9294                         rc = XMIT_CSUM_V4;
9295                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9296                                 rc |= XMIT_CSUM_TCP;
9297                 }
9298         }
9299
9300         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9301                 rc |= XMIT_GSO_V4;
9302
9303         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9304                 rc |= XMIT_GSO_V6;
9305
9306         return rc;
9307 }
9308
9309 /* check if packet requires linearization (packet is too fragmented) */
9310 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9311                              u32 xmit_type)
9312 {
9313         int to_copy = 0;
9314         int hlen = 0;
9315         int first_bd_sz = 0;
9316
9317         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9318         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9319
9320                 if (xmit_type & XMIT_GSO) {
9321                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9322                         /* Check if LSO packet needs to be copied:
9323                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9324                         int wnd_size = MAX_FETCH_BD - 3;
9325                         /* Number of widnows to check */
9326                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9327                         int wnd_idx = 0;
9328                         int frag_idx = 0;
9329                         u32 wnd_sum = 0;
9330
9331                         /* Headers length */
9332                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9333                                 tcp_hdrlen(skb);
9334
9335                         /* Amount of data (w/o headers) on linear part of SKB*/
9336                         first_bd_sz = skb_headlen(skb) - hlen;
9337
9338                         wnd_sum  = first_bd_sz;
9339
9340                         /* Calculate the first sum - it's special */
9341                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9342                                 wnd_sum +=
9343                                         skb_shinfo(skb)->frags[frag_idx].size;
9344
9345                         /* If there was data on linear skb data - check it */
9346                         if (first_bd_sz > 0) {
9347                                 if (unlikely(wnd_sum < lso_mss)) {
9348                                         to_copy = 1;
9349                                         goto exit_lbl;
9350                                 }
9351
9352                                 wnd_sum -= first_bd_sz;
9353                         }
9354
9355                         /* Others are easier: run through the frag list and
9356                            check all windows */
9357                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9358                                 wnd_sum +=
9359                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9360
9361                                 if (unlikely(wnd_sum < lso_mss)) {
9362                                         to_copy = 1;
9363                                         break;
9364                                 }
9365                                 wnd_sum -=
9366                                         skb_shinfo(skb)->frags[wnd_idx].size;
9367                         }
9368
9369                 } else {
9370                         /* in non-LSO too fragmented packet should always
9371                            be linearized */
9372                         to_copy = 1;
9373                 }
9374         }
9375
9376 exit_lbl:
9377         if (unlikely(to_copy))
9378                 DP(NETIF_MSG_TX_QUEUED,
9379                    "Linearization IS REQUIRED for %s packet. "
9380                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9381                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9382                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9383
9384         return to_copy;
9385 }
9386
9387 /* called with netif_tx_lock
9388  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9389  * netif_wake_queue()
9390  */
9391 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9392 {
9393         struct bnx2x *bp = netdev_priv(dev);
9394         struct bnx2x_fastpath *fp;
9395         struct sw_tx_bd *tx_buf;
9396         struct eth_tx_bd *tx_bd;
9397         struct eth_tx_parse_bd *pbd = NULL;
9398         u16 pkt_prod, bd_prod;
9399         int nbd, fp_index;
9400         dma_addr_t mapping;
9401         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9402         int vlan_off = (bp->e1hov ? 4 : 0);
9403         int i;
9404         u8 hlen = 0;
9405
9406 #ifdef BNX2X_STOP_ON_ERROR
9407         if (unlikely(bp->panic))
9408                 return NETDEV_TX_BUSY;
9409 #endif
9410
9411         fp_index = (smp_processor_id() % bp->num_queues);
9412         fp = &bp->fp[fp_index];
9413
9414         if (unlikely(bnx2x_tx_avail(bp->fp) <
9415                                         (skb_shinfo(skb)->nr_frags + 3))) {
9416                 bp->eth_stats.driver_xoff++,
9417                 netif_stop_queue(dev);
9418                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9419                 return NETDEV_TX_BUSY;
9420         }
9421
9422         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9423            "  gso type %x  xmit_type %x\n",
9424            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9425            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9426
9427         /* First, check if we need to linearaize the skb
9428            (due to FW restrictions) */
9429         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9430                 /* Statistics of linearization */
9431                 bp->lin_cnt++;
9432                 if (skb_linearize(skb) != 0) {
9433                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9434                            "silently dropping this SKB\n");
9435                         dev_kfree_skb_any(skb);
9436                         return NETDEV_TX_OK;
9437                 }
9438         }
9439
9440         /*
9441         Please read carefully. First we use one BD which we mark as start,
9442         then for TSO or xsum we have a parsing info BD,
9443         and only then we have the rest of the TSO BDs.
9444         (don't forget to mark the last one as last,
9445         and to unmap only AFTER you write to the BD ...)
9446         And above all, all pdb sizes are in words - NOT DWORDS!
9447         */
9448
9449         pkt_prod = fp->tx_pkt_prod++;
9450         bd_prod = TX_BD(fp->tx_bd_prod);
9451
9452         /* get a tx_buf and first BD */
9453         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9454         tx_bd = &fp->tx_desc_ring[bd_prod];
9455
9456         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9457         tx_bd->general_data = (UNICAST_ADDRESS <<
9458                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9459         tx_bd->general_data |= 1; /* header nbd */
9460
9461         /* remember the first BD of the packet */
9462         tx_buf->first_bd = fp->tx_bd_prod;
9463         tx_buf->skb = skb;
9464
9465         DP(NETIF_MSG_TX_QUEUED,
9466            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9467            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9468
9469         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9470                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9471                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9472                 vlan_off += 4;
9473         } else
9474                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9475
9476         if (xmit_type) {
9477
9478                 /* turn on parsing and get a BD */
9479                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9480                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9481
9482                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9483         }
9484
9485         if (xmit_type & XMIT_CSUM) {
9486                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9487
9488                 /* for now NS flag is not used in Linux */
9489                 pbd->global_data = (hlen |
9490                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9491                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9492
9493                 pbd->ip_hlen = (skb_transport_header(skb) -
9494                                 skb_network_header(skb)) / 2;
9495
9496                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9497
9498                 pbd->total_hlen = cpu_to_le16(hlen);
9499                 hlen = hlen*2 - vlan_off;
9500
9501                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9502
9503                 if (xmit_type & XMIT_CSUM_V4)
9504                         tx_bd->bd_flags.as_bitfield |=
9505                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9506                 else
9507                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9508
9509                 if (xmit_type & XMIT_CSUM_TCP) {
9510                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9511
9512                 } else {
9513                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9514
9515                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9516                         pbd->cs_offset = fix / 2;
9517
9518                         DP(NETIF_MSG_TX_QUEUED,
9519                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9520                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9521                            SKB_CS(skb));
9522
9523                         /* HW bug: fixup the CSUM */
9524                         pbd->tcp_pseudo_csum =
9525                                 bnx2x_csum_fix(skb_transport_header(skb),
9526                                                SKB_CS(skb), fix);
9527
9528                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9529                            pbd->tcp_pseudo_csum);
9530                 }
9531         }
9532
9533         mapping = pci_map_single(bp->pdev, skb->data,
9534                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9535
9536         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9537         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9538         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9539         tx_bd->nbd = cpu_to_le16(nbd);
9540         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9541
9542         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9543            "  nbytes %d  flags %x  vlan %x\n",
9544            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9545            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9546            le16_to_cpu(tx_bd->vlan));
9547
9548         if (xmit_type & XMIT_GSO) {
9549
9550                 DP(NETIF_MSG_TX_QUEUED,
9551                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9552                    skb->len, hlen, skb_headlen(skb),
9553                    skb_shinfo(skb)->gso_size);
9554
9555                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9556
9557                 if (unlikely(skb_headlen(skb) > hlen))
9558                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9559                                                  bd_prod, ++nbd);
9560
9561                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9562                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9563                 pbd->tcp_flags = pbd_tcp_flags(skb);
9564
9565                 if (xmit_type & XMIT_GSO_V4) {
9566                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9567                         pbd->tcp_pseudo_csum =
9568                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9569                                                           ip_hdr(skb)->daddr,
9570                                                           0, IPPROTO_TCP, 0));
9571
9572                 } else
9573                         pbd->tcp_pseudo_csum =
9574                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9575                                                         &ipv6_hdr(skb)->daddr,
9576                                                         0, IPPROTO_TCP, 0));
9577
9578                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9579         }
9580
9581         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9582                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9583
9584                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9585                 tx_bd = &fp->tx_desc_ring[bd_prod];
9586
9587                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9588                                        frag->size, PCI_DMA_TODEVICE);
9589
9590                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9591                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9592                 tx_bd->nbytes = cpu_to_le16(frag->size);
9593                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9594                 tx_bd->bd_flags.as_bitfield = 0;
9595
9596                 DP(NETIF_MSG_TX_QUEUED,
9597                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9598                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9599                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9600         }
9601
9602         /* now at last mark the BD as the last BD */
9603         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9604
9605         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9606            tx_bd, tx_bd->bd_flags.as_bitfield);
9607
9608         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9609
9610         /* now send a tx doorbell, counting the next BD
9611          * if the packet contains or ends with it
9612          */
9613         if (TX_BD_POFF(bd_prod) < nbd)
9614                 nbd++;
9615
9616         if (pbd)
9617                 DP(NETIF_MSG_TX_QUEUED,
9618                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9619                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9620                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9621                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9622                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9623
9624         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9625
9626         fp->hw_tx_prods->bds_prod =
9627                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9628         mb(); /* FW restriction: must not reorder writing nbd and packets */
9629         fp->hw_tx_prods->packets_prod =
9630                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9631         DOORBELL(bp, FP_IDX(fp), 0);
9632
9633         mmiowb();
9634
9635         fp->tx_bd_prod += nbd;
9636         dev->trans_start = jiffies;
9637
9638         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9639                 netif_stop_queue(dev);
9640                 bp->eth_stats.driver_xoff++;
9641                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9642                         netif_wake_queue(dev);
9643         }
9644         fp->tx_pkt++;
9645
9646         return NETDEV_TX_OK;
9647 }
9648
9649 /* called with rtnl_lock */
9650 static int bnx2x_open(struct net_device *dev)
9651 {
9652         struct bnx2x *bp = netdev_priv(dev);
9653
9654         bnx2x_set_power_state(bp, PCI_D0);
9655
9656         return bnx2x_nic_load(bp, LOAD_OPEN);
9657 }
9658
9659 /* called with rtnl_lock */
9660 static int bnx2x_close(struct net_device *dev)
9661 {
9662         struct bnx2x *bp = netdev_priv(dev);
9663
9664         /* Unload the driver, release IRQs */
9665         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9666         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9667                 if (!CHIP_REV_IS_SLOW(bp))
9668                         bnx2x_set_power_state(bp, PCI_D3hot);
9669
9670         return 0;
9671 }
9672
9673 /* called with netif_tx_lock from set_multicast */
9674 static void bnx2x_set_rx_mode(struct net_device *dev)
9675 {
9676         struct bnx2x *bp = netdev_priv(dev);
9677         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9678         int port = BP_PORT(bp);
9679
9680         if (bp->state != BNX2X_STATE_OPEN) {
9681                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9682                 return;
9683         }
9684
9685         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9686
9687         if (dev->flags & IFF_PROMISC)
9688                 rx_mode = BNX2X_RX_MODE_PROMISC;
9689
9690         else if ((dev->flags & IFF_ALLMULTI) ||
9691                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9692                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9693
9694         else { /* some multicasts */
9695                 if (CHIP_IS_E1(bp)) {
9696                         int i, old, offset;
9697                         struct dev_mc_list *mclist;
9698                         struct mac_configuration_cmd *config =
9699                                                 bnx2x_sp(bp, mcast_config);
9700
9701                         for (i = 0, mclist = dev->mc_list;
9702                              mclist && (i < dev->mc_count);
9703                              i++, mclist = mclist->next) {
9704
9705                                 config->config_table[i].
9706                                         cam_entry.msb_mac_addr =
9707                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9708                                 config->config_table[i].
9709                                         cam_entry.middle_mac_addr =
9710                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9711                                 config->config_table[i].
9712                                         cam_entry.lsb_mac_addr =
9713                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9714                                 config->config_table[i].cam_entry.flags =
9715                                                         cpu_to_le16(port);
9716                                 config->config_table[i].
9717                                         target_table_entry.flags = 0;
9718                                 config->config_table[i].
9719                                         target_table_entry.client_id = 0;
9720                                 config->config_table[i].
9721                                         target_table_entry.vlan_id = 0;
9722
9723                                 DP(NETIF_MSG_IFUP,
9724                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9725                                    config->config_table[i].
9726                                                 cam_entry.msb_mac_addr,
9727                                    config->config_table[i].
9728                                                 cam_entry.middle_mac_addr,
9729                                    config->config_table[i].
9730                                                 cam_entry.lsb_mac_addr);
9731                         }
9732                         old = config->hdr.length_6b;
9733                         if (old > i) {
9734                                 for (; i < old; i++) {
9735                                         if (CAM_IS_INVALID(config->
9736                                                            config_table[i])) {
9737                                                 i--; /* already invalidated */
9738                                                 break;
9739                                         }
9740                                         /* invalidate */
9741                                         CAM_INVALIDATE(config->
9742                                                        config_table[i]);
9743                                 }
9744                         }
9745
9746                         if (CHIP_REV_IS_SLOW(bp))
9747                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9748                         else
9749                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9750
9751                         config->hdr.length_6b = i;
9752                         config->hdr.offset = offset;
9753                         config->hdr.client_id = BP_CL_ID(bp);
9754                         config->hdr.reserved1 = 0;
9755
9756                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9757                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9758                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9759                                       0);
9760                 } else { /* E1H */
9761                         /* Accept one or more multicasts */
9762                         struct dev_mc_list *mclist;
9763                         u32 mc_filter[MC_HASH_SIZE];
9764                         u32 crc, bit, regidx;
9765                         int i;
9766
9767                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9768
9769                         for (i = 0, mclist = dev->mc_list;
9770                              mclist && (i < dev->mc_count);
9771                              i++, mclist = mclist->next) {
9772
9773                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9774                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9775                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9776                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9777                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9778
9779                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9780                                 bit = (crc >> 24) & 0xff;
9781                                 regidx = bit >> 5;
9782                                 bit &= 0x1f;
9783                                 mc_filter[regidx] |= (1 << bit);
9784                         }
9785
9786                         for (i = 0; i < MC_HASH_SIZE; i++)
9787                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9788                                        mc_filter[i]);
9789                 }
9790         }
9791
9792         bp->rx_mode = rx_mode;
9793         bnx2x_set_storm_rx_mode(bp);
9794 }
9795
9796 /* called with rtnl_lock */
9797 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9798 {
9799         struct sockaddr *addr = p;
9800         struct bnx2x *bp = netdev_priv(dev);
9801
9802         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9803                 return -EINVAL;
9804
9805         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9806         if (netif_running(dev)) {
9807                 if (CHIP_IS_E1(bp))
9808                         bnx2x_set_mac_addr_e1(bp);
9809                 else
9810                         bnx2x_set_mac_addr_e1h(bp);
9811         }
9812
9813         return 0;
9814 }
9815
9816 /* called with rtnl_lock */
9817 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9818 {
9819         struct mii_ioctl_data *data = if_mii(ifr);
9820         struct bnx2x *bp = netdev_priv(dev);
9821         int err;
9822
9823         switch (cmd) {
9824         case SIOCGMIIPHY:
9825                 data->phy_id = bp->port.phy_addr;
9826
9827                 /* fallthrough */
9828
9829         case SIOCGMIIREG: {
9830                 u16 mii_regval;
9831
9832                 if (!netif_running(dev))
9833                         return -EAGAIN;
9834
9835                 mutex_lock(&bp->port.phy_mutex);
9836                 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9837                                       DEFAULT_PHY_DEV_ADDR,
9838                                       (data->reg_num & 0x1f), &mii_regval);
9839                 data->val_out = mii_regval;
9840                 mutex_unlock(&bp->port.phy_mutex);
9841                 return err;
9842         }
9843
9844         case SIOCSMIIREG:
9845                 if (!capable(CAP_NET_ADMIN))
9846                         return -EPERM;
9847
9848                 if (!netif_running(dev))
9849                         return -EAGAIN;
9850
9851                 mutex_lock(&bp->port.phy_mutex);
9852                 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9853                                        DEFAULT_PHY_DEV_ADDR,
9854                                        (data->reg_num & 0x1f), data->val_in);
9855                 mutex_unlock(&bp->port.phy_mutex);
9856                 return err;
9857
9858         default:
9859                 /* do nothing */
9860                 break;
9861         }
9862
9863         return -EOPNOTSUPP;
9864 }
9865
9866 /* called with rtnl_lock */
9867 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9868 {
9869         struct bnx2x *bp = netdev_priv(dev);
9870         int rc = 0;
9871
9872         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9873             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9874                 return -EINVAL;
9875
9876         /* This does not race with packet allocation
9877          * because the actual alloc size is
9878          * only updated as part of load
9879          */
9880         dev->mtu = new_mtu;
9881
9882         if (netif_running(dev)) {
9883                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9884                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9885         }
9886
9887         return rc;
9888 }
9889
9890 static void bnx2x_tx_timeout(struct net_device *dev)
9891 {
9892         struct bnx2x *bp = netdev_priv(dev);
9893
9894 #ifdef BNX2X_STOP_ON_ERROR
9895         if (!bp->panic)
9896                 bnx2x_panic();
9897 #endif
9898         /* This allows the netif to be shutdown gracefully before resetting */
9899         schedule_work(&bp->reset_task);
9900 }
9901
9902 #ifdef BCM_VLAN
9903 /* called with rtnl_lock */
9904 static void bnx2x_vlan_rx_register(struct net_device *dev,
9905                                    struct vlan_group *vlgrp)
9906 {
9907         struct bnx2x *bp = netdev_priv(dev);
9908
9909         bp->vlgrp = vlgrp;
9910         if (netif_running(dev))
9911                 bnx2x_set_client_config(bp);
9912 }
9913
9914 #endif
9915
9916 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9917 static void poll_bnx2x(struct net_device *dev)
9918 {
9919         struct bnx2x *bp = netdev_priv(dev);
9920
9921         disable_irq(bp->pdev->irq);
9922         bnx2x_interrupt(bp->pdev->irq, dev);
9923         enable_irq(bp->pdev->irq);
9924 }
9925 #endif
9926
9927 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9928                                     struct net_device *dev)
9929 {
9930         struct bnx2x *bp;
9931         int rc;
9932
9933         SET_NETDEV_DEV(dev, &pdev->dev);
9934         bp = netdev_priv(dev);
9935
9936         bp->dev = dev;
9937         bp->pdev = pdev;
9938         bp->flags = 0;
9939         bp->func = PCI_FUNC(pdev->devfn);
9940
9941         rc = pci_enable_device(pdev);
9942         if (rc) {
9943                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9944                 goto err_out;
9945         }
9946
9947         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9948                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9949                        " aborting\n");
9950                 rc = -ENODEV;
9951                 goto err_out_disable;
9952         }
9953
9954         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9955                 printk(KERN_ERR PFX "Cannot find second PCI device"
9956                        " base address, aborting\n");
9957                 rc = -ENODEV;
9958                 goto err_out_disable;
9959         }
9960
9961         if (atomic_read(&pdev->enable_cnt) == 1) {
9962                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9963                 if (rc) {
9964                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9965                                " aborting\n");
9966                         goto err_out_disable;
9967                 }
9968
9969                 pci_set_master(pdev);
9970                 pci_save_state(pdev);
9971         }
9972
9973         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9974         if (bp->pm_cap == 0) {
9975                 printk(KERN_ERR PFX "Cannot find power management"
9976                        " capability, aborting\n");
9977                 rc = -EIO;
9978                 goto err_out_release;
9979         }
9980
9981         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9982         if (bp->pcie_cap == 0) {
9983                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9984                        " aborting\n");
9985                 rc = -EIO;
9986                 goto err_out_release;
9987         }
9988
9989         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9990                 bp->flags |= USING_DAC_FLAG;
9991                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9992                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9993                                " failed, aborting\n");
9994                         rc = -EIO;
9995                         goto err_out_release;
9996                 }
9997
9998         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9999                 printk(KERN_ERR PFX "System does not support DMA,"
10000                        " aborting\n");
10001                 rc = -EIO;
10002                 goto err_out_release;
10003         }
10004
10005         dev->mem_start = pci_resource_start(pdev, 0);
10006         dev->base_addr = dev->mem_start;
10007         dev->mem_end = pci_resource_end(pdev, 0);
10008
10009         dev->irq = pdev->irq;
10010
10011         bp->regview = ioremap_nocache(dev->base_addr,
10012                                       pci_resource_len(pdev, 0));
10013         if (!bp->regview) {
10014                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10015                 rc = -ENOMEM;
10016                 goto err_out_release;
10017         }
10018
10019         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10020                                         min_t(u64, BNX2X_DB_SIZE,
10021                                               pci_resource_len(pdev, 2)));
10022         if (!bp->doorbells) {
10023                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10024                 rc = -ENOMEM;
10025                 goto err_out_unmap;
10026         }
10027
10028         bnx2x_set_power_state(bp, PCI_D0);
10029
10030         /* clean indirect addresses */
10031         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10032                                PCICFG_VENDOR_ID_OFFSET);
10033         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10034         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10035         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10036         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10037
10038         dev->hard_start_xmit = bnx2x_start_xmit;
10039         dev->watchdog_timeo = TX_TIMEOUT;
10040
10041         dev->ethtool_ops = &bnx2x_ethtool_ops;
10042         dev->open = bnx2x_open;
10043         dev->stop = bnx2x_close;
10044         dev->set_multicast_list = bnx2x_set_rx_mode;
10045         dev->set_mac_address = bnx2x_change_mac_addr;
10046         dev->do_ioctl = bnx2x_ioctl;
10047         dev->change_mtu = bnx2x_change_mtu;
10048         dev->tx_timeout = bnx2x_tx_timeout;
10049 #ifdef BCM_VLAN
10050         dev->vlan_rx_register = bnx2x_vlan_rx_register;
10051 #endif
10052 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10053         dev->poll_controller = poll_bnx2x;
10054 #endif
10055         dev->features |= NETIF_F_SG;
10056         dev->features |= NETIF_F_HW_CSUM;
10057         if (bp->flags & USING_DAC_FLAG)
10058                 dev->features |= NETIF_F_HIGHDMA;
10059 #ifdef BCM_VLAN
10060         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10061 #endif
10062         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10063         dev->features |= NETIF_F_TSO6;
10064
10065         return 0;
10066
10067 err_out_unmap:
10068         if (bp->regview) {
10069                 iounmap(bp->regview);
10070                 bp->regview = NULL;
10071         }
10072         if (bp->doorbells) {
10073                 iounmap(bp->doorbells);
10074                 bp->doorbells = NULL;
10075         }
10076
10077 err_out_release:
10078         if (atomic_read(&pdev->enable_cnt) == 1)
10079                 pci_release_regions(pdev);
10080
10081 err_out_disable:
10082         pci_disable_device(pdev);
10083         pci_set_drvdata(pdev, NULL);
10084
10085 err_out:
10086         return rc;
10087 }
10088
10089 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10090 {
10091         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10092
10093         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10094         return val;
10095 }
10096
10097 /* return value of 1=2.5GHz 2=5GHz */
10098 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10099 {
10100         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10101
10102         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10103         return val;
10104 }
10105
10106 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10107                                     const struct pci_device_id *ent)
10108 {
10109         static int version_printed;
10110         struct net_device *dev = NULL;
10111         struct bnx2x *bp;
10112         int rc;
10113         DECLARE_MAC_BUF(mac);
10114
10115         if (version_printed++ == 0)
10116                 printk(KERN_INFO "%s", version);
10117
10118         /* dev zeroed in init_etherdev */
10119         dev = alloc_etherdev(sizeof(*bp));
10120         if (!dev) {
10121                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10122                 return -ENOMEM;
10123         }
10124
10125         netif_carrier_off(dev);
10126
10127         bp = netdev_priv(dev);
10128         bp->msglevel = debug;
10129
10130         rc = bnx2x_init_dev(pdev, dev);
10131         if (rc < 0) {
10132                 free_netdev(dev);
10133                 return rc;
10134         }
10135
10136         rc = register_netdev(dev);
10137         if (rc) {
10138                 dev_err(&pdev->dev, "Cannot register net device\n");
10139                 goto init_one_exit;
10140         }
10141
10142         pci_set_drvdata(pdev, dev);
10143
10144         rc = bnx2x_init_bp(bp);
10145         if (rc) {
10146                 unregister_netdev(dev);
10147                 goto init_one_exit;
10148         }
10149
10150         bp->common.name = board_info[ent->driver_data].name;
10151         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10152                " IRQ %d, ", dev->name, bp->common.name,
10153                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10154                bnx2x_get_pcie_width(bp),
10155                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10156                dev->base_addr, bp->pdev->irq);
10157         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10158         return 0;
10159
10160 init_one_exit:
10161         if (bp->regview)
10162                 iounmap(bp->regview);
10163
10164         if (bp->doorbells)
10165                 iounmap(bp->doorbells);
10166
10167         free_netdev(dev);
10168
10169         if (atomic_read(&pdev->enable_cnt) == 1)
10170                 pci_release_regions(pdev);
10171
10172         pci_disable_device(pdev);
10173         pci_set_drvdata(pdev, NULL);
10174
10175         return rc;
10176 }
10177
10178 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10179 {
10180         struct net_device *dev = pci_get_drvdata(pdev);
10181         struct bnx2x *bp;
10182
10183         if (!dev) {
10184                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10185                 return;
10186         }
10187         bp = netdev_priv(dev);
10188
10189         unregister_netdev(dev);
10190
10191         if (bp->regview)
10192                 iounmap(bp->regview);
10193
10194         if (bp->doorbells)
10195                 iounmap(bp->doorbells);
10196
10197         free_netdev(dev);
10198
10199         if (atomic_read(&pdev->enable_cnt) == 1)
10200                 pci_release_regions(pdev);
10201
10202         pci_disable_device(pdev);
10203         pci_set_drvdata(pdev, NULL);
10204 }
10205
10206 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10207 {
10208         struct net_device *dev = pci_get_drvdata(pdev);
10209         struct bnx2x *bp;
10210
10211         if (!dev) {
10212                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10213                 return -ENODEV;
10214         }
10215         bp = netdev_priv(dev);
10216
10217         rtnl_lock();
10218
10219         pci_save_state(pdev);
10220
10221         if (!netif_running(dev)) {
10222                 rtnl_unlock();
10223                 return 0;
10224         }
10225
10226         netif_device_detach(dev);
10227
10228         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10229
10230         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10231
10232         rtnl_unlock();
10233
10234         return 0;
10235 }
10236
10237 static int bnx2x_resume(struct pci_dev *pdev)
10238 {
10239         struct net_device *dev = pci_get_drvdata(pdev);
10240         struct bnx2x *bp;
10241         int rc;
10242
10243         if (!dev) {
10244                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10245                 return -ENODEV;
10246         }
10247         bp = netdev_priv(dev);
10248
10249         rtnl_lock();
10250
10251         pci_restore_state(pdev);
10252
10253         if (!netif_running(dev)) {
10254                 rtnl_unlock();
10255                 return 0;
10256         }
10257
10258         bnx2x_set_power_state(bp, PCI_D0);
10259         netif_device_attach(dev);
10260
10261         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10262
10263         rtnl_unlock();
10264
10265         return rc;
10266 }
10267
10268 /**
10269  * bnx2x_io_error_detected - called when PCI error is detected
10270  * @pdev: Pointer to PCI device
10271  * @state: The current pci connection state
10272  *
10273  * This function is called after a PCI bus error affecting
10274  * this device has been detected.
10275  */
10276 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10277                                                 pci_channel_state_t state)
10278 {
10279         struct net_device *dev = pci_get_drvdata(pdev);
10280         struct bnx2x *bp = netdev_priv(dev);
10281
10282         rtnl_lock();
10283
10284         netif_device_detach(dev);
10285
10286         if (netif_running(dev))
10287                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10288
10289         pci_disable_device(pdev);
10290
10291         rtnl_unlock();
10292
10293         /* Request a slot reset */
10294         return PCI_ERS_RESULT_NEED_RESET;
10295 }
10296
10297 /**
10298  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10299  * @pdev: Pointer to PCI device
10300  *
10301  * Restart the card from scratch, as if from a cold-boot.
10302  */
10303 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10304 {
10305         struct net_device *dev = pci_get_drvdata(pdev);
10306         struct bnx2x *bp = netdev_priv(dev);
10307
10308         rtnl_lock();
10309
10310         if (pci_enable_device(pdev)) {
10311                 dev_err(&pdev->dev,
10312                         "Cannot re-enable PCI device after reset\n");
10313                 rtnl_unlock();
10314                 return PCI_ERS_RESULT_DISCONNECT;
10315         }
10316
10317         pci_set_master(pdev);
10318         pci_restore_state(pdev);
10319
10320         if (netif_running(dev))
10321                 bnx2x_set_power_state(bp, PCI_D0);
10322
10323         rtnl_unlock();
10324
10325         return PCI_ERS_RESULT_RECOVERED;
10326 }
10327
10328 /**
10329  * bnx2x_io_resume - called when traffic can start flowing again
10330  * @pdev: Pointer to PCI device
10331  *
10332  * This callback is called when the error recovery driver tells us that
10333  * its OK to resume normal operation.
10334  */
10335 static void bnx2x_io_resume(struct pci_dev *pdev)
10336 {
10337         struct net_device *dev = pci_get_drvdata(pdev);
10338         struct bnx2x *bp = netdev_priv(dev);
10339
10340         rtnl_lock();
10341
10342         if (netif_running(dev))
10343                 bnx2x_nic_load(bp, LOAD_OPEN);
10344
10345         netif_device_attach(dev);
10346
10347         rtnl_unlock();
10348 }
10349
10350 static struct pci_error_handlers bnx2x_err_handler = {
10351         .error_detected = bnx2x_io_error_detected,
10352         .slot_reset = bnx2x_io_slot_reset,
10353         .resume = bnx2x_io_resume,
10354 };
10355
10356 static struct pci_driver bnx2x_pci_driver = {
10357         .name        = DRV_MODULE_NAME,
10358         .id_table    = bnx2x_pci_tbl,
10359         .probe       = bnx2x_init_one,
10360         .remove      = __devexit_p(bnx2x_remove_one),
10361         .suspend     = bnx2x_suspend,
10362         .resume      = bnx2x_resume,
10363         .err_handler = &bnx2x_err_handler,
10364 };
10365
10366 static int __init bnx2x_init(void)
10367 {
10368         return pci_register_driver(&bnx2x_pci_driver);
10369 }
10370
10371 static void __exit bnx2x_cleanup(void)
10372 {
10373         pci_unregister_driver(&bnx2x_pci_driver);
10374 }
10375
10376 module_init(bnx2x_init);
10377 module_exit(bnx2x_cleanup);
10378